]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
Merge LZX compression updates
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior on cache overflow is still required.
83  */
84 #define LZX_CACHE_PER_POS       6
85
86 #define LZX_CACHE_LEN           (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
87
88 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
89
90 /*
91  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
92  * THis makes it possible to consider fractional bit costs.
93  *
94  * Note: this is only useful as a statistical trick for when the true costs are
95  * unknown.  In reality, each token in LZX requires a whole number of bits to
96  * output.
97  */
98 #define LZX_BIT_COST            16
99
100 /*
101  * Consideration of aligned offset costs is disabled for now, due to
102  * insufficient benefit gained from the time spent.
103  */
104 #define LZX_CONSIDER_ALIGNED_COSTS      0
105
106 /*
107  * The maximum compression level at which we use the faster algorithm.
108  */
109 #define LZX_MAX_FAST_LEVEL      34
110
111 /*
112  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
113  * for finding length 2 matches.  This can be as high as 16 (in which case the
114  * hash function is trivial), but using a smaller hash table actually speeds up
115  * compression due to reduced cache pressure.
116  */
117 #define LZX_HASH2_ORDER         12
118 #define LZX_HASH2_LENGTH        (1UL << LZX_HASH2_ORDER)
119
120 #include "wimlib/lzx_common.h"
121
122 /*
123  * The maximum allowed window order for the matchfinder.
124  */
125 #define MATCHFINDER_MAX_WINDOW_ORDER    LZX_MAX_WINDOW_ORDER
126
127 #include <string.h>
128
129 #include "wimlib/bt_matchfinder.h"
130 #include "wimlib/compress_common.h"
131 #include "wimlib/compressor_ops.h"
132 #include "wimlib/endianness.h"
133 #include "wimlib/error.h"
134 #include "wimlib/hc_matchfinder.h"
135 #include "wimlib/lz_extend.h"
136 #include "wimlib/unaligned.h"
137 #include "wimlib/util.h"
138
139 struct lzx_output_bitstream;
140
141 /* Codewords for the LZX Huffman codes.  */
142 struct lzx_codewords {
143         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
144         u32 len[LZX_LENCODE_NUM_SYMBOLS];
145         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
146 };
147
148 /* Codeword lengths (in bits) for the LZX Huffman codes.
149  * A zero length means the corresponding codeword has zero frequency.  */
150 struct lzx_lens {
151         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
152         u8 len[LZX_LENCODE_NUM_SYMBOLS];
153         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
154 };
155
156 /* Cost model for near-optimal parsing  */
157 struct lzx_costs {
158
159         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
160          * length 'len' match that has an offset belonging to 'offset_slot'.  */
161         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
162
163         /* Cost for each symbol in the main code  */
164         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
165
166         /* Cost for each symbol in the length code  */
167         u32 len[LZX_LENCODE_NUM_SYMBOLS];
168
169 #if LZX_CONSIDER_ALIGNED_COSTS
170         /* Cost for each symbol in the aligned code  */
171         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
172 #endif
173 };
174
175 /* Codewords and lengths for the LZX Huffman codes.  */
176 struct lzx_codes {
177         struct lzx_codewords codewords;
178         struct lzx_lens lens;
179 };
180
181 /* Symbol frequency counters for the LZX Huffman codes.  */
182 struct lzx_freqs {
183         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
184         u32 len[LZX_LENCODE_NUM_SYMBOLS];
185         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
186 };
187
188 /* Intermediate LZX match/literal format  */
189 struct lzx_item {
190
191         /* Bits 0  -  9: Main symbol
192          * Bits 10 - 17: Length symbol
193          * Bits 18 - 22: Number of extra offset bits
194          * Bits 23+    : Extra offset bits  */
195         u64 data;
196 };
197
198 /*
199  * This structure represents a byte position in the input buffer and a node in
200  * the graph of possible match/literal choices.
201  *
202  * Logically, each incoming edge to this node is labeled with a literal or a
203  * match that can be taken to reach this position from an earlier position; and
204  * each outgoing edge from this node is labeled with a literal or a match that
205  * can be taken to advance from this position to a later position.
206  */
207 struct lzx_optimum_node {
208
209         /* The cost, in bits, of the lowest-cost path that has been found to
210          * reach this position.  This can change as progressively lower cost
211          * paths are found to reach this position.  */
212         u32 cost;
213
214         /*
215          * The match or literal that was taken to reach this position.  This can
216          * change as progressively lower cost paths are found to reach this
217          * position.
218          *
219          * This variable is divided into two bitfields.
220          *
221          * Literals:
222          *      Low bits are 1, high bits are the literal.
223          *
224          * Explicit offset matches:
225          *      Low bits are the match length, high bits are the offset plus 2.
226          *
227          * Repeat offset matches:
228          *      Low bits are the match length, high bits are the queue index.
229          */
230         u32 item;
231 #define OPTIMUM_OFFSET_SHIFT 9
232 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
233 } _aligned_attribute(8);
234
235 /*
236  * Least-recently-used queue for match offsets.
237  *
238  * This is represented as a 64-bit integer for efficiency.  There are three
239  * offsets of 21 bits each.  Bit 64 is garbage.
240  */
241 struct lzx_lru_queue {
242         u64 R;
243 };
244
245 #define LZX_QUEUE64_OFFSET_SHIFT 21
246 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
247
248 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
249 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
250 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
251
252 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
253 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
254 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
255
256 static inline void
257 lzx_lru_queue_init(struct lzx_lru_queue *queue)
258 {
259         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
260                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
261                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
262 }
263
264 static inline u64
265 lzx_lru_queue_R0(struct lzx_lru_queue queue)
266 {
267         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
268 }
269
270 static inline u64
271 lzx_lru_queue_R1(struct lzx_lru_queue queue)
272 {
273         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
274 }
275
276 static inline u64
277 lzx_lru_queue_R2(struct lzx_lru_queue queue)
278 {
279         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
280 }
281
282 /* Push a match offset onto the front (most recently used) end of the queue.  */
283 static inline struct lzx_lru_queue
284 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
285 {
286         return (struct lzx_lru_queue) {
287                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
288         };
289 }
290
291 /* Pop a match offset off the front (most recently used) end of the queue.  */
292 static inline u32
293 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
294 {
295         u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
296         queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
297         return offset;
298 }
299
300 /* Swap a match offset to the front of the queue.  */
301 static inline struct lzx_lru_queue
302 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
303 {
304         if (idx == 0)
305                 return queue;
306
307         if (idx == 1)
308                 return (struct lzx_lru_queue) {
309                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
310                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
311                              (queue.R & LZX_QUEUE64_R2_MASK),
312                 };
313
314         return (struct lzx_lru_queue) {
315                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
316                      (queue.R & LZX_QUEUE64_R1_MASK) |
317                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
318         };
319 }
320
321 /* The main LZX compressor structure  */
322 struct lzx_compressor {
323
324         /* The "nice" match length: if a match of this length is found, then
325          * choose it immediately without further consideration.  */
326         unsigned nice_match_length;
327
328         /* The maximum search depth: consider at most this many potential
329          * matches at each position.  */
330         unsigned max_search_depth;
331
332         /* The log base 2 of the LZX window size for LZ match offset encoding
333          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
334          * LZX_MAX_WINDOW_ORDER.  */
335         unsigned window_order;
336
337         /* The number of symbols in the main alphabet.  This depends on
338          * @window_order, since @window_order determines the maximum possible
339          * offset.  */
340         unsigned num_main_syms;
341
342         /* Number of optimization passes per block  */
343         unsigned num_optim_passes;
344
345         /* The preprocessed buffer of data being compressed  */
346         u8 *in_buffer;
347
348         /* The number of bytes of data to be compressed, which is the number of
349          * bytes of data in @in_buffer that are actually valid.  */
350         size_t in_nbytes;
351
352         /* Pointer to the compress() implementation chosen at allocation time */
353         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
354
355         /* The Huffman symbol frequency counters for the current block.  */
356         struct lzx_freqs freqs;
357
358         /* The Huffman codes for the current and previous blocks.  The one with
359          * index 'codes_index' is for the current block, and the other one is
360          * for the previous block.  */
361         struct lzx_codes codes[2];
362         unsigned codes_index;
363
364         /* The match/literal sequence the algorithm chose for the current block.
365          */
366         struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN + 1];
367
368         /* Table mapping match offset => offset slot for small offsets  */
369 #define LZX_NUM_FAST_OFFSETS 32768
370         u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
371
372         union {
373                 /* Data for greedy or lazy parsing  */
374                 struct {
375                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
376                         struct hc_matchfinder hc_mf;
377                 };
378
379                 /* Data for near-optimal parsing  */
380                 struct {
381                         /* The graph nodes for the current block  */
382                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
383                                                               LZX_MAX_MATCH_LEN + 1];
384
385                         /* The cost model for the current block  */
386                         struct lzx_costs costs;
387
388                         /* Cached matches for the current block  */
389                         struct lz_match match_cache[LZX_CACHE_LEN + 1 +
390                                                     LZX_MAX_MATCHES_PER_POS];
391                         struct lz_match *cache_overflow_mark;
392
393                         /* Hash table for finding length 2 matches  */
394                         pos_t hash2_tab[LZX_HASH2_LENGTH]
395                                 _aligned_attribute(MATCHFINDER_ALIGNMENT);
396
397                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
398                         struct bt_matchfinder bt_mf;
399                 };
400         };
401 };
402
403 /* Compute a hash value for the next 2 bytes of uncompressed data.  */
404 static inline u32
405 lz_hash_2_bytes(const u8 *in_next)
406 {
407         u16 next_2_bytes = load_u16_unaligned(in_next);
408         if (LZX_HASH2_ORDER == 16)
409                 return next_2_bytes;
410         else
411                 return lz_hash(next_2_bytes, LZX_HASH2_ORDER);
412 }
413
414 /*
415  * Structure to keep track of the current state of sending bits to the
416  * compressed output buffer.
417  *
418  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
419  */
420 struct lzx_output_bitstream {
421
422         /* Bits that haven't yet been written to the output buffer.  */
423         u32 bitbuf;
424
425         /* Number of bits currently held in @bitbuf.  */
426         u32 bitcount;
427
428         /* Pointer to the start of the output buffer.  */
429         le16 *start;
430
431         /* Pointer to the position in the output buffer at which the next coding
432          * unit should be written.  */
433         le16 *next;
434
435         /* Pointer past the end of the output buffer.  */
436         le16 *end;
437 };
438
439 /*
440  * Initialize the output bitstream.
441  *
442  * @os
443  *      The output bitstream structure to initialize.
444  * @buffer
445  *      The buffer being written to.
446  * @size
447  *      Size of @buffer, in bytes.
448  */
449 static void
450 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
451 {
452         os->bitbuf = 0;
453         os->bitcount = 0;
454         os->start = buffer;
455         os->next = os->start;
456         os->end = os->start + size / sizeof(le16);
457 }
458
459 /*
460  * Write some bits to the output bitstream.
461  *
462  * The bits are given by the low-order @num_bits bits of @bits.  Higher-order
463  * bits in @bits cannot be set.  At most 17 bits can be written at once.
464  *
465  * @max_num_bits is a compile-time constant that specifies the maximum number of
466  * bits that can ever be written at the call site.  Currently, it is used to
467  * optimize away the conditional code for writing a second 16-bit coding unit
468  * when writing fewer than 17 bits.
469  *
470  * If the output buffer space is exhausted, then the bits will be ignored, and
471  * lzx_flush_output() will return 0 when it gets called.
472  */
473 static inline void
474 lzx_write_varbits(struct lzx_output_bitstream *os,
475                   const u32 bits, const unsigned num_bits,
476                   const unsigned max_num_bits)
477 {
478         /* This code is optimized for LZX, which never needs to write more than
479          * 17 bits at once.  */
480         LZX_ASSERT(num_bits <= 17);
481         LZX_ASSERT(num_bits <= max_num_bits);
482         LZX_ASSERT(os->bitcount <= 15);
483
484         /* Add the bits to the bit buffer variable.  @bitcount will be at most
485          * 15, so there will be just enough space for the maximum possible
486          * @num_bits of 17.  */
487         os->bitcount += num_bits;
488         os->bitbuf = (os->bitbuf << num_bits) | bits;
489
490         /* Check whether any coding units need to be written.  */
491         if (os->bitcount >= 16) {
492
493                 os->bitcount -= 16;
494
495                 /* Write a coding unit, unless it would overflow the buffer.  */
496                 if (os->next != os->end)
497                         put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
498
499                 /* If writing 17 bits, a second coding unit might need to be
500                  * written.  But because 'max_num_bits' is a compile-time
501                  * constant, the compiler will optimize away this code at most
502                  * call sites.  */
503                 if (max_num_bits == 17 && os->bitcount == 16) {
504                         if (os->next != os->end)
505                                 put_unaligned_u16_le(os->bitbuf, os->next++);
506                         os->bitcount = 0;
507                 }
508         }
509 }
510
511 /* Use when @num_bits is a compile-time constant.  Otherwise use
512  * lzx_write_varbits().  */
513 static inline void
514 lzx_write_bits(struct lzx_output_bitstream *os,
515                const u32 bits, const unsigned num_bits)
516 {
517         lzx_write_varbits(os, bits, num_bits, num_bits);
518 }
519
520 /*
521  * Flush the last coding unit to the output buffer if needed.  Return the total
522  * number of bytes written to the output buffer, or 0 if an overflow occurred.
523  */
524 static u32
525 lzx_flush_output(struct lzx_output_bitstream *os)
526 {
527         if (os->next == os->end)
528                 return 0;
529
530         if (os->bitcount != 0)
531                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
532
533         return (const u8 *)os->next - (const u8 *)os->start;
534 }
535
536 /* Build the main, length, and aligned offset Huffman codes used in LZX.
537  *
538  * This takes as input the frequency tables for each code and produces as output
539  * a set of tables that map symbols to codewords and codeword lengths.  */
540 static void
541 lzx_make_huffman_codes(struct lzx_compressor *c)
542 {
543         const struct lzx_freqs *freqs = &c->freqs;
544         struct lzx_codes *codes = &c->codes[c->codes_index];
545
546         make_canonical_huffman_code(c->num_main_syms,
547                                     LZX_MAX_MAIN_CODEWORD_LEN,
548                                     freqs->main,
549                                     codes->lens.main,
550                                     codes->codewords.main);
551
552         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
553                                     LZX_MAX_LEN_CODEWORD_LEN,
554                                     freqs->len,
555                                     codes->lens.len,
556                                     codes->codewords.len);
557
558         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
559                                     LZX_MAX_ALIGNED_CODEWORD_LEN,
560                                     freqs->aligned,
561                                     codes->lens.aligned,
562                                     codes->codewords.aligned);
563 }
564
565 /* Reset the symbol frequencies for the LZX Huffman codes.  */
566 static void
567 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
568 {
569         memset(&c->freqs, 0, sizeof(c->freqs));
570 }
571
572 static unsigned
573 lzx_compute_precode_items(const u8 lens[restrict],
574                           const u8 prev_lens[restrict],
575                           const unsigned num_lens,
576                           u32 precode_freqs[restrict],
577                           unsigned precode_items[restrict])
578 {
579         unsigned *itemptr;
580         unsigned run_start;
581         unsigned run_end;
582         unsigned extra_bits;
583         int delta;
584         u8 len;
585
586         itemptr = precode_items;
587         run_start = 0;
588         do {
589                 /* Find the next run of codeword lengths.  */
590
591                 /* len = the length being repeated  */
592                 len = lens[run_start];
593
594                 run_end = run_start + 1;
595
596                 /* Fast case for a single length.  */
597                 if (likely(run_end == num_lens || len != lens[run_end])) {
598                         delta = prev_lens[run_start] - len;
599                         if (delta < 0)
600                                 delta += 17;
601                         precode_freqs[delta]++;
602                         *itemptr++ = delta;
603                         run_start++;
604                         continue;
605                 }
606
607                 /* Extend the run.  */
608                 do {
609                         run_end++;
610                 } while (run_end != num_lens && len == lens[run_end]);
611
612                 if (len == 0) {
613                         /* Run of zeroes.  */
614
615                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
616                         while ((run_end - run_start) >= 20) {
617                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
618                                 precode_freqs[18]++;
619                                 *itemptr++ = 18 | (extra_bits << 5);
620                                 run_start += 20 + extra_bits;
621                         }
622
623                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
624                         if ((run_end - run_start) >= 4) {
625                                 extra_bits = min((run_end - run_start) - 4, 0xf);
626                                 precode_freqs[17]++;
627                                 *itemptr++ = 17 | (extra_bits << 5);
628                                 run_start += 4 + extra_bits;
629                         }
630                 } else {
631
632                         /* A run of nonzero lengths. */
633
634                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
635                         while ((run_end - run_start) >= 4) {
636                                 extra_bits = (run_end - run_start) > 4;
637                                 delta = prev_lens[run_start] - len;
638                                 if (delta < 0)
639                                         delta += 17;
640                                 precode_freqs[19]++;
641                                 precode_freqs[delta]++;
642                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
643                                 run_start += 4 + extra_bits;
644                         }
645                 }
646
647                 /* Output any remaining lengths without RLE.  */
648                 while (run_start != run_end) {
649                         delta = prev_lens[run_start] - len;
650                         if (delta < 0)
651                                 delta += 17;
652                         precode_freqs[delta]++;
653                         *itemptr++ = delta;
654                         run_start++;
655                 }
656         } while (run_start != num_lens);
657
658         return itemptr - precode_items;
659 }
660
661 /*
662  * Output a Huffman code in the compressed form used in LZX.
663  *
664  * The Huffman code is represented in the output as a logical series of codeword
665  * lengths from which the Huffman code, which must be in canonical form, can be
666  * reconstructed.
667  *
668  * The codeword lengths are themselves compressed using a separate Huffman code,
669  * the "precode", which contains a symbol for each possible codeword length in
670  * the larger code as well as several special symbols to represent repeated
671  * codeword lengths (a form of run-length encoding).  The precode is itself
672  * constructed in canonical form, and its codeword lengths are represented
673  * literally in 20 4-bit fields that immediately precede the compressed codeword
674  * lengths of the larger code.
675  *
676  * Furthermore, the codeword lengths of the larger code are actually represented
677  * as deltas from the codeword lengths of the corresponding code in the previous
678  * block.
679  *
680  * @os:
681  *      Bitstream to which to write the compressed Huffman code.
682  * @lens:
683  *      The codeword lengths, indexed by symbol, in the Huffman code.
684  * @prev_lens:
685  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
686  *      code in the previous block, or all zeroes if this is the first block.
687  * @num_lens:
688  *      The number of symbols in the Huffman code.
689  */
690 static void
691 lzx_write_compressed_code(struct lzx_output_bitstream *os,
692                           const u8 lens[restrict],
693                           const u8 prev_lens[restrict],
694                           unsigned num_lens)
695 {
696         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
697         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
698         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
699         unsigned precode_items[num_lens];
700         unsigned num_precode_items;
701         unsigned precode_item;
702         unsigned precode_sym;
703         unsigned i;
704
705         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
706                 precode_freqs[i] = 0;
707
708         /* Compute the "items" (RLE / literal tokens and extra bits) with which
709          * the codeword lengths in the larger code will be output.  */
710         num_precode_items = lzx_compute_precode_items(lens,
711                                                       prev_lens,
712                                                       num_lens,
713                                                       precode_freqs,
714                                                       precode_items);
715
716         /* Build the precode.  */
717         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
718                                     LZX_MAX_PRE_CODEWORD_LEN,
719                                     precode_freqs, precode_lens,
720                                     precode_codewords);
721
722         /* Output the lengths of the codewords in the precode.  */
723         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
724                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
725
726         /* Output the encoded lengths of the codewords in the larger code.  */
727         for (i = 0; i < num_precode_items; i++) {
728                 precode_item = precode_items[i];
729                 precode_sym = precode_item & 0x1F;
730                 lzx_write_varbits(os, precode_codewords[precode_sym],
731                                   precode_lens[precode_sym],
732                                   LZX_MAX_PRE_CODEWORD_LEN);
733                 if (precode_sym >= 17) {
734                         if (precode_sym == 17) {
735                                 lzx_write_bits(os, precode_item >> 5, 4);
736                         } else if (precode_sym == 18) {
737                                 lzx_write_bits(os, precode_item >> 5, 5);
738                         } else {
739                                 lzx_write_bits(os, (precode_item >> 5) & 1, 1);
740                                 precode_sym = precode_item >> 6;
741                                 lzx_write_varbits(os, precode_codewords[precode_sym],
742                                                   precode_lens[precode_sym],
743                                                   LZX_MAX_PRE_CODEWORD_LEN);
744                         }
745                 }
746         }
747 }
748
749 /* Output a match or literal.  */
750 static inline void
751 lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
752                unsigned ones_if_aligned, const struct lzx_codes *codes)
753 {
754         u64 data = item.data;
755         unsigned main_symbol;
756         unsigned len_symbol;
757         unsigned num_extra_bits;
758         u32 extra_bits;
759
760         main_symbol = data & 0x3FF;
761
762         lzx_write_varbits(os, codes->codewords.main[main_symbol],
763                           codes->lens.main[main_symbol],
764                           LZX_MAX_MAIN_CODEWORD_LEN);
765
766         if (main_symbol < LZX_NUM_CHARS)  /* Literal?  */
767                 return;
768
769         len_symbol = (data >> 10) & 0xFF;
770
771         if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
772                 lzx_write_varbits(os, codes->codewords.len[len_symbol],
773                                   codes->lens.len[len_symbol],
774                                   LZX_MAX_LEN_CODEWORD_LEN);
775         }
776
777         num_extra_bits = (data >> 18) & 0x1F;
778         if (num_extra_bits == 0)  /* Small offset or repeat offset match?  */
779                 return;
780
781         extra_bits = data >> 23;
782
783         if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
784
785                 /* Aligned offset blocks: The low 3 bits of the extra offset
786                  * bits are Huffman-encoded using the aligned offset code.  The
787                  * remaining bits are output literally.  */
788
789                 lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
790                                   num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
791                                   17 - LZX_NUM_ALIGNED_OFFSET_BITS);
792
793                 lzx_write_varbits(os,
794                                   codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
795                                   codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
796                                   LZX_MAX_ALIGNED_CODEWORD_LEN);
797         } else {
798                 /* Verbatim blocks, or fewer than 3 extra bits:  All extra
799                  * offset bits are output literally.  */
800                 lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
801         }
802 }
803
804 /*
805  * Write all matches and literal bytes (which were precomputed) in an LZX
806  * compressed block to the output bitstream in the final compressed
807  * representation.
808  *
809  * @os
810  *      The output bitstream.
811  * @block_type
812  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
813  *      LZX_BLOCKTYPE_VERBATIM).
814  * @items
815  *      The array of matches/literals to output.
816  * @num_items
817  *      Number of matches/literals to output (length of @items).
818  * @codes
819  *      The main, length, and aligned offset Huffman codes for the current
820  *      LZX compressed block.
821  */
822 static void
823 lzx_write_items(struct lzx_output_bitstream *os, int block_type,
824                 const struct lzx_item items[], u32 num_items,
825                 const struct lzx_codes *codes)
826 {
827         unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
828
829         for (u32 i = 0; i < num_items; i++)
830                 lzx_write_item(os, items[i], ones_if_aligned, codes);
831 }
832
833 static void
834 lzx_write_compressed_block(int block_type,
835                            u32 block_size,
836                            unsigned window_order,
837                            unsigned num_main_syms,
838                            const struct lzx_item chosen_items[],
839                            u32 num_chosen_items,
840                            const struct lzx_codes * codes,
841                            const struct lzx_lens * prev_lens,
842                            struct lzx_output_bitstream * os)
843 {
844         LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
845                    block_type == LZX_BLOCKTYPE_VERBATIM);
846
847         /* The first three bits indicate the type of block and are one of the
848          * LZX_BLOCKTYPE_* constants.  */
849         lzx_write_bits(os, block_type, 3);
850
851         /* Output the block size.
852          *
853          * The original LZX format seemed to always encode the block size in 3
854          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
855          * uses the first bit to indicate whether the block is the default size
856          * (32768) or a different size given explicitly by the next 16 bits.
857          *
858          * By default, this compressor uses a window size of 32768 and therefore
859          * follows the WIMGAPI behavior.  However, this compressor also supports
860          * window sizes greater than 32768 bytes, which do not appear to be
861          * supported by WIMGAPI.  In such cases, we retain the default size bit
862          * to mean a size of 32768 bytes but output non-default block size in 24
863          * bits rather than 16.  The compatibility of this behavior is unknown
864          * because WIMs created with chunk size greater than 32768 can seemingly
865          * only be opened by wimlib anyway.  */
866         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
867                 lzx_write_bits(os, 1, 1);
868         } else {
869                 lzx_write_bits(os, 0, 1);
870
871                 if (window_order >= 16)
872                         lzx_write_bits(os, block_size >> 16, 8);
873
874                 lzx_write_bits(os, block_size & 0xFFFF, 16);
875         }
876
877         /* If it's an aligned offset block, output the aligned offset code.  */
878         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
879                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
880                         lzx_write_bits(os, codes->lens.aligned[i],
881                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
882                 }
883         }
884
885         /* Output the main code (two parts).  */
886         lzx_write_compressed_code(os, codes->lens.main,
887                                   prev_lens->main,
888                                   LZX_NUM_CHARS);
889         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
890                                   prev_lens->main + LZX_NUM_CHARS,
891                                   num_main_syms - LZX_NUM_CHARS);
892
893         /* Output the length code.  */
894         lzx_write_compressed_code(os, codes->lens.len,
895                                   prev_lens->len,
896                                   LZX_LENCODE_NUM_SYMBOLS);
897
898         /* Output the compressed matches and literals.  */
899         lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
900 }
901
902 /* Given the frequencies of symbols in an LZX-compressed block and the
903  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
904  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
905  * will take fewer bits to output.  */
906 static int
907 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
908                                const struct lzx_codes * codes)
909 {
910         u32 aligned_cost = 0;
911         u32 verbatim_cost = 0;
912
913         /* A verbatim block requires 3 bits in each place that an aligned symbol
914          * would be used in an aligned offset block.  */
915         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
916                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
917                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
918         }
919
920         /* Account for output of the aligned offset code.  */
921         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
922
923         if (aligned_cost < verbatim_cost)
924                 return LZX_BLOCKTYPE_ALIGNED;
925         else
926                 return LZX_BLOCKTYPE_VERBATIM;
927 }
928
929 /*
930  * Finish an LZX block:
931  *
932  * - build the Huffman codes
933  * - decide whether to output the block as VERBATIM or ALIGNED
934  * - output the block
935  * - swap the indices of the current and previous Huffman codes
936  */
937 static void
938 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
939                  u32 block_size, u32 num_chosen_items)
940 {
941         int block_type;
942
943         lzx_make_huffman_codes(c);
944
945         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
946                                                     &c->codes[c->codes_index]);
947         lzx_write_compressed_block(block_type,
948                                    block_size,
949                                    c->window_order,
950                                    c->num_main_syms,
951                                    c->chosen_items,
952                                    num_chosen_items,
953                                    &c->codes[c->codes_index],
954                                    &c->codes[c->codes_index ^ 1].lens,
955                                    os);
956         c->codes_index ^= 1;
957 }
958
959 /* Return the offset slot for the specified offset, which must be
960  * less than LZX_NUM_FAST_OFFSETS.  */
961 static inline unsigned
962 lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset)
963 {
964         LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS);
965         return c->offset_slot_fast[offset];
966 }
967
968 /* Tally, and optionally record, the specified literal byte.  */
969 static inline void
970 lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
971                     struct lzx_item **next_chosen_item)
972 {
973         unsigned main_symbol = lzx_main_symbol_for_literal(literal);
974
975         c->freqs.main[main_symbol]++;
976
977         if (next_chosen_item) {
978                 *(*next_chosen_item)++ = (struct lzx_item) {
979                         .data = main_symbol,
980                 };
981         }
982 }
983
984 /* Tally, and optionally record, the specified repeat offset match.  */
985 static inline void
986 lzx_declare_repeat_offset_match(struct lzx_compressor *c,
987                                 unsigned len, unsigned rep_index,
988                                 struct lzx_item **next_chosen_item)
989 {
990         unsigned len_header;
991         unsigned len_symbol;
992         unsigned main_symbol;
993
994         if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
995                 len_header = len - LZX_MIN_MATCH_LEN;
996                 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
997         } else {
998                 len_header = LZX_NUM_PRIMARY_LENS;
999                 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1000                 c->freqs.len[len_symbol]++;
1001         }
1002
1003         main_symbol = lzx_main_symbol_for_match(rep_index, len_header);
1004
1005         c->freqs.main[main_symbol]++;
1006
1007         if (next_chosen_item) {
1008                 *(*next_chosen_item)++ = (struct lzx_item) {
1009                         .data = (u64)main_symbol | ((u64)len_symbol << 10),
1010                 };
1011         }
1012 }
1013
1014 /* Tally, and optionally record, the specified explicit offset match.  */
1015 static inline void
1016 lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
1017                                   struct lzx_item **next_chosen_item)
1018 {
1019         unsigned len_header;
1020         unsigned len_symbol;
1021         unsigned main_symbol;
1022         unsigned offset_slot;
1023         unsigned num_extra_bits;
1024         u32 extra_bits;
1025
1026         if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1027                 len_header = len - LZX_MIN_MATCH_LEN;
1028                 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1029         } else {
1030                 len_header = LZX_NUM_PRIMARY_LENS;
1031                 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1032                 c->freqs.len[len_symbol]++;
1033         }
1034
1035         offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1036                         lzx_get_offset_slot_fast(c, offset) :
1037                         lzx_get_offset_slot(offset);
1038
1039         main_symbol = lzx_main_symbol_for_match(offset_slot, len_header);
1040
1041         c->freqs.main[main_symbol]++;
1042
1043         num_extra_bits = lzx_extra_offset_bits[offset_slot];
1044
1045         if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS)
1046                 c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) &
1047                                  LZX_ALIGNED_OFFSET_BITMASK]++;
1048
1049         if (next_chosen_item) {
1050
1051                 extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) -
1052                              lzx_offset_slot_base[offset_slot];
1053
1054                 BUILD_BUG_ON(LZX_MAINCODE_MAX_NUM_SYMBOLS > (1 << 10));
1055                 BUILD_BUG_ON(LZX_LENCODE_NUM_SYMBOLS > (1 << 8));
1056                 *(*next_chosen_item)++ = (struct lzx_item) {
1057                         .data = (u64)main_symbol |
1058                                 ((u64)len_symbol << 10) |
1059                                 ((u64)num_extra_bits << 18) |
1060                                 ((u64)extra_bits << 23),
1061                 };
1062         }
1063 }
1064
1065
1066 /* Tally, and optionally record, the specified match or literal.  */
1067 static inline void
1068 lzx_declare_item(struct lzx_compressor *c, u32 item,
1069                  struct lzx_item **next_chosen_item)
1070 {
1071         u32 len = item & OPTIMUM_LEN_MASK;
1072         u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1073
1074         if (len == 1)
1075                 lzx_declare_literal(c, offset_data, next_chosen_item);
1076         else if (offset_data < LZX_NUM_RECENT_OFFSETS)
1077                 lzx_declare_repeat_offset_match(c, len, offset_data,
1078                                                 next_chosen_item);
1079         else
1080                 lzx_declare_explicit_offset_match(c, len,
1081                                                   offset_data - LZX_OFFSET_ADJUSTMENT,
1082                                                   next_chosen_item);
1083 }
1084
1085 static inline void
1086 lzx_record_item_list(struct lzx_compressor *c,
1087                      struct lzx_optimum_node *cur_node,
1088                      struct lzx_item **next_chosen_item)
1089 {
1090         struct lzx_optimum_node *end_node;
1091         u32 saved_item;
1092         u32 item;
1093
1094         /* The list is currently in reverse order (last item to first item).
1095          * Reverse it.  */
1096         end_node = cur_node;
1097         saved_item = cur_node->item;
1098         do {
1099                 item = saved_item;
1100                 cur_node -= item & OPTIMUM_LEN_MASK;
1101                 saved_item = cur_node->item;
1102                 cur_node->item = item;
1103         } while (cur_node != c->optimum_nodes);
1104
1105         /* Walk the list of items from beginning to end, tallying and recording
1106          * each item.  */
1107         do {
1108                 lzx_declare_item(c, cur_node->item, next_chosen_item);
1109                 cur_node += (cur_node->item) & OPTIMUM_LEN_MASK;
1110         } while (cur_node != end_node);
1111 }
1112
1113 static inline void
1114 lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node)
1115 {
1116         /* Since we're just tallying the items, we don't need to reverse the
1117          * list.  Processing the items in reverse order is fine.  */
1118         do {
1119                 lzx_declare_item(c, cur_node->item, NULL);
1120                 cur_node -= (cur_node->item & OPTIMUM_LEN_MASK);
1121         } while (cur_node != c->optimum_nodes);
1122 }
1123
1124 /*
1125  * Find an inexpensive path through the graph of possible match/literal choices
1126  * for the current block.  The nodes of the graph are
1127  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1128  * the current block, plus one extra node for end-of-block.  The edges of the
1129  * graph are matches and literals.  The goal is to find the minimum cost path
1130  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1131  *
1132  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1133  * proceeding forwards one node at a time.  At each node, a selection of matches
1134  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1135  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1136  * such a path is the lowest cost found so far to reach that later node, then
1137  * that later node is updated with the new path.
1138  *
1139  * Note that although this algorithm is based on minimum cost path search, due
1140  * to various simplifying assumptions the result is not guaranteed to be the
1141  * true minimum cost, or "optimal", path over the graph of all valid LZX
1142  * representations of this block.
1143  *
1144  * Also, note that because of the presence of the recent offsets queue (which is
1145  * a type of adaptive state), the algorithm cannot work backwards and compute
1146  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1147  * algorithm handles this adaptive state in the "minimum-cost" parse is actually
1148  * only an approximation.  It's possible for the globally optimal, minimum cost
1149  * path to contain a prefix, ending at a position, where that path prefix is
1150  * *not* the minimum cost path to that position.  This can happen if such a path
1151  * prefix results in a different adaptive state which results in lower costs
1152  * later.  The algorithm does not solve this problem; it only considers the
1153  * lowest cost to reach each individual position.
1154  */
1155 static struct lzx_lru_queue
1156 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1157                        const u8 * const restrict block_begin,
1158                        const u32 block_size,
1159                        const struct lzx_lru_queue initial_queue)
1160 {
1161         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1162         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1163         struct lz_match *cache_ptr = c->match_cache;
1164         const u8 *in_next = block_begin;
1165         const u8 * const block_end = block_begin + block_size;
1166
1167         /* Instead of storing the match offset LRU queues in the
1168          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1169          * storing them in a smaller array.  This works because the algorithm
1170          * only requires a limited history of the adaptive state.  Once a given
1171          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1172          * it is no longer needed.  */
1173         struct lzx_lru_queue queues[512];
1174
1175         BUILD_BUG_ON(ARRAY_LEN(queues) < LZX_MAX_MATCH_LEN + 1);
1176 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1177
1178         /* Initially, the cost to reach each node is "infinity".  */
1179         memset(c->optimum_nodes, 0xFF,
1180                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1181
1182         QUEUE(block_begin) = initial_queue;
1183
1184         /* The following loop runs 'block_size' iterations, one per node.  */
1185         do {
1186                 unsigned num_matches;
1187                 unsigned literal;
1188                 u32 cost;
1189
1190                 /*
1191                  * A selection of matches for the block was already saved in
1192                  * memory so that we don't have to run the uncompressed data
1193                  * through the matchfinder on every optimization pass.  However,
1194                  * we still search for repeat offset matches during each
1195                  * optimization pass because we cannot predict the state of the
1196                  * recent offsets queue.  But as a heuristic, we don't bother
1197                  * searching for repeat offset matches if the general-purpose
1198                  * matchfinder failed to find any matches.
1199                  *
1200                  * Note that a match of length n at some offset implies there is
1201                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1202                  * that same offset.  In other words, we don't necessarily need
1203                  * to use the full length of a match.  The key heuristic that
1204                  * saves a significicant amount of time is that for each
1205                  * distinct length, we only consider the smallest offset for
1206                  * which that length is available.  This heuristic also applies
1207                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1208                  * any explicit offset.  Of course, this heuristic may be
1209                  * produce suboptimal results because offset slots in LZX are
1210                  * subject to entropy encoding, but in practice this is a useful
1211                  * heuristic.
1212                  */
1213
1214                 num_matches = cache_ptr->length;
1215                 cache_ptr++;
1216
1217                 if (num_matches) {
1218                         struct lz_match *end_matches = cache_ptr + num_matches;
1219                         unsigned next_len = LZX_MIN_MATCH_LEN;
1220                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1221                         const u8 *matchptr;
1222
1223                         /* Consider R0 match  */
1224                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1225                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1226                                 goto R0_done;
1227                         BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1228                         do {
1229                                 u32 cost = cur_node->cost +
1230                                            c->costs.match_cost[0][
1231                                                         next_len - LZX_MIN_MATCH_LEN];
1232                                 if (cost <= (cur_node + next_len)->cost) {
1233                                         (cur_node + next_len)->cost = cost;
1234                                         (cur_node + next_len)->item =
1235                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1236                                 }
1237                                 if (unlikely(++next_len > max_len)) {
1238                                         cache_ptr = end_matches;
1239                                         goto done_matches;
1240                                 }
1241                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1242
1243                 R0_done:
1244
1245                         /* Consider R1 match  */
1246                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1247                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1248                                 goto R1_done;
1249                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1250                                 goto R1_done;
1251                         for (unsigned len = 2; len < next_len - 1; len++)
1252                                 if (matchptr[len] != in_next[len])
1253                                         goto R1_done;
1254                         do {
1255                                 u32 cost = cur_node->cost +
1256                                            c->costs.match_cost[1][
1257                                                         next_len - LZX_MIN_MATCH_LEN];
1258                                 if (cost <= (cur_node + next_len)->cost) {
1259                                         (cur_node + next_len)->cost = cost;
1260                                         (cur_node + next_len)->item =
1261                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1262                                 }
1263                                 if (unlikely(++next_len > max_len)) {
1264                                         cache_ptr = end_matches;
1265                                         goto done_matches;
1266                                 }
1267                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1268
1269                 R1_done:
1270
1271                         /* Consider R2 match  */
1272                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1273                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1274                                 goto R2_done;
1275                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1276                                 goto R2_done;
1277                         for (unsigned len = 2; len < next_len - 1; len++)
1278                                 if (matchptr[len] != in_next[len])
1279                                         goto R2_done;
1280                         do {
1281                                 u32 cost = cur_node->cost +
1282                                            c->costs.match_cost[2][
1283                                                         next_len - LZX_MIN_MATCH_LEN];
1284                                 if (cost <= (cur_node + next_len)->cost) {
1285                                         (cur_node + next_len)->cost = cost;
1286                                         (cur_node + next_len)->item =
1287                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1288                                 }
1289                                 if (unlikely(++next_len > max_len)) {
1290                                         cache_ptr = end_matches;
1291                                         goto done_matches;
1292                                 }
1293                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1294
1295                 R2_done:
1296
1297                         while (next_len > cache_ptr->length)
1298                                 if (++cache_ptr == end_matches)
1299                                         goto done_matches;
1300
1301                         /* Consider explicit offset matches  */
1302                         do {
1303                                 u32 offset = cache_ptr->offset;
1304                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1305                                 unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1306                                                 lzx_get_offset_slot_fast(c, offset) :
1307                                                 lzx_get_offset_slot(offset);
1308                                 do {
1309                                         u32 cost = cur_node->cost +
1310                                                    c->costs.match_cost[offset_slot][
1311                                                                 next_len - LZX_MIN_MATCH_LEN];
1312                                 #if LZX_CONSIDER_ALIGNED_COSTS
1313                                         if (lzx_extra_offset_bits[offset_slot] >=
1314                                             LZX_NUM_ALIGNED_OFFSET_BITS)
1315                                                 cost += c->costs.aligned[offset_data &
1316                                                                          LZX_ALIGNED_OFFSET_BITMASK];
1317                                 #endif
1318                                         if (cost < (cur_node + next_len)->cost) {
1319                                                 (cur_node + next_len)->cost = cost;
1320                                                 (cur_node + next_len)->item =
1321                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1322                                         }
1323                                 } while (++next_len <= cache_ptr->length);
1324                         } while (++cache_ptr != end_matches);
1325                 }
1326
1327         done_matches:
1328
1329                 /* Consider coding a literal.
1330
1331                  * To avoid an extra branch, actually checking the preferability
1332                  * of coding the literal is integrated into the queue update
1333                  * code below.  */
1334                 literal = *in_next++;
1335                 cost = cur_node->cost +
1336                        c->costs.main[lzx_main_symbol_for_literal(literal)];
1337
1338                 /* Advance to the next position.  */
1339                 cur_node++;
1340
1341                 /* The lowest-cost path to the current position is now known.
1342                  * Finalize the recent offsets queue that results from taking
1343                  * this lowest-cost path.  */
1344
1345                 if (cost <= cur_node->cost) {
1346                         /* Literal: queue remains unchanged.  */
1347                         cur_node->cost = cost;
1348                         cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1;
1349                         QUEUE(in_next) = QUEUE(in_next - 1);
1350                 } else {
1351                         /* Match: queue update is needed.  */
1352                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1353                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1354                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1355                                 /* Explicit offset match: insert offset at front  */
1356                                 QUEUE(in_next) =
1357                                         lzx_lru_queue_push(QUEUE(in_next - len),
1358                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1359                         } else {
1360                                 /* Repeat offset match: swap offset to front  */
1361                                 QUEUE(in_next) =
1362                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1363                                                            offset_data);
1364                         }
1365                 }
1366         } while (cur_node != end_node);
1367
1368         /* Return the match offset queue at the end of the minimum-cost path. */
1369         return QUEUE(block_end);
1370 }
1371
1372 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1373 static void
1374 lzx_compute_match_costs(struct lzx_compressor *c)
1375 {
1376         unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1377         struct lzx_costs *costs = &c->costs;
1378
1379         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1380
1381                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1382                 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1383                 unsigned i;
1384
1385         #if LZX_CONSIDER_ALIGNED_COSTS
1386                 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1387                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1388         #endif
1389
1390                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1391                         costs->match_cost[offset_slot][i] =
1392                                 costs->main[main_symbol++] + extra_cost;
1393
1394                 extra_cost += costs->main[main_symbol];
1395
1396                 for (; i < LZX_NUM_LENS; i++)
1397                         costs->match_cost[offset_slot][i] =
1398                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1399         }
1400 }
1401
1402 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1403  * algorithm.  */
1404 static void
1405 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1406 {
1407         u32 i;
1408         bool have_byte[256];
1409         unsigned num_used_bytes;
1410
1411         /* The costs below are hard coded to use a scaling factor of 16.  */
1412         BUILD_BUG_ON(LZX_BIT_COST != 16);
1413
1414         /*
1415          * Heuristics:
1416          *
1417          * - Use smaller initial costs for literal symbols when the input buffer
1418          *   contains fewer distinct bytes.
1419          *
1420          * - Assume that match symbols are more costly than literal symbols.
1421          *
1422          * - Assume that length symbols for shorter lengths are less costly than
1423          *   length symbols for longer lengths.
1424          */
1425
1426         for (i = 0; i < 256; i++)
1427                 have_byte[i] = false;
1428
1429         for (i = 0; i < block_size; i++)
1430                 have_byte[block[i]] = true;
1431
1432         num_used_bytes = 0;
1433         for (i = 0; i < 256; i++)
1434                 num_used_bytes += have_byte[i];
1435
1436         for (i = 0; i < 256; i++)
1437                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1438
1439         for (; i < c->num_main_syms; i++)
1440                 c->costs.main[i] = 170;
1441
1442         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1443                 c->costs.len[i] = 103 + (i / 4);
1444
1445 #if LZX_CONSIDER_ALIGNED_COSTS
1446         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1447                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1448 #endif
1449
1450         lzx_compute_match_costs(c);
1451 }
1452
1453 /* Update the current cost model to reflect the computed Huffman codes.  */
1454 static void
1455 lzx_update_costs(struct lzx_compressor *c)
1456 {
1457         unsigned i;
1458         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1459
1460         for (i = 0; i < c->num_main_syms; i++)
1461                 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1462
1463         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1464                 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1465
1466 #if LZX_CONSIDER_ALIGNED_COSTS
1467         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1468                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1469 #endif
1470
1471         lzx_compute_match_costs(c);
1472 }
1473
1474 static struct lzx_lru_queue
1475 lzx_optimize_and_write_block(struct lzx_compressor *c,
1476                              struct lzx_output_bitstream *os,
1477                              const u8 *block_begin, const u32 block_size,
1478                              const struct lzx_lru_queue initial_queue)
1479 {
1480         unsigned num_passes_remaining = c->num_optim_passes;
1481         struct lzx_item *next_chosen_item;
1482         struct lzx_lru_queue new_queue;
1483
1484         /* The first optimization pass uses a default cost model.  Each
1485          * additional optimization pass uses a cost model derived from the
1486          * Huffman code computed in the previous pass.  */
1487
1488         lzx_set_default_costs(c, block_begin, block_size);
1489         lzx_reset_symbol_frequencies(c);
1490         do {
1491                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1492                                                    initial_queue);
1493                 if (num_passes_remaining > 1) {
1494                         lzx_tally_item_list(c, c->optimum_nodes + block_size);
1495                         lzx_make_huffman_codes(c);
1496                         lzx_update_costs(c);
1497                         lzx_reset_symbol_frequencies(c);
1498                 }
1499         } while (--num_passes_remaining);
1500
1501         next_chosen_item = c->chosen_items;
1502         lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item);
1503         lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items);
1504         return new_queue;
1505 }
1506
1507 /*
1508  * This is the "near-optimal" LZX compressor.
1509  *
1510  * For each block, it performs a relatively thorough graph search to find an
1511  * inexpensive (in terms of compressed size) way to output that block.
1512  *
1513  * Note: there are actually many things this algorithm leaves on the table in
1514  * terms of compression ratio.  So although it may be "near-optimal", it is
1515  * certainly not "optimal".  The goal is not to produce the optimal compression
1516  * ratio, which for LZX is probably impossible within any practical amount of
1517  * time, but rather to produce a compression ratio significantly better than a
1518  * simpler "greedy" or "lazy" parse while still being relatively fast.
1519  */
1520 static void
1521 lzx_compress_near_optimal(struct lzx_compressor *c,
1522                           struct lzx_output_bitstream *os)
1523 {
1524         const u8 * const in_begin = c->in_buffer;
1525         const u8 *       in_next = in_begin;
1526         const u8 * const in_end  = in_begin + c->in_nbytes;
1527         unsigned max_len = LZX_MAX_MATCH_LEN;
1528         unsigned nice_len = min(c->nice_match_length, max_len);
1529         u32 next_hash;
1530         struct lzx_lru_queue queue;
1531
1532         bt_matchfinder_init(&c->bt_mf);
1533         matchfinder_init(c->hash2_tab, LZX_HASH2_LENGTH);
1534         next_hash = bt_matchfinder_hash_3_bytes(in_next);
1535         lzx_lru_queue_init(&queue);
1536
1537         do {
1538                 /* Starting a new block  */
1539                 const u8 * const in_block_begin = in_next;
1540                 const u8 * const in_block_end =
1541                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1542
1543                 /* Run the block through the matchfinder and cache the matches. */
1544                 struct lz_match *cache_ptr = c->match_cache;
1545                 do {
1546                         struct lz_match *lz_matchptr;
1547                         u32 hash2;
1548                         pos_t cur_match;
1549                         unsigned best_len;
1550
1551                         /* If approaching the end of the input buffer, adjust
1552                          * 'max_len' and 'nice_len' accordingly.  */
1553                         if (unlikely(max_len > in_end - in_next)) {
1554                                 max_len = in_end - in_next;
1555                                 nice_len = min(max_len, nice_len);
1556
1557                                 /* This extra check is needed to ensure that
1558                                  * reading the next 3 bytes when looking for a
1559                                  * length 2 match is valid.  In addition, we
1560                                  * cannot allow ourselves to find a length 2
1561                                  * match of the very last two bytes with the
1562                                  * very first two bytes, since such a match has
1563                                  * an offset too large to be represented.  */
1564                                 if (unlikely(max_len <
1565                                              max(LZ_HASH_REQUIRED_NBYTES, 3)))
1566                                 {
1567                                         in_next++;
1568                                         cache_ptr->length = 0;
1569                                         cache_ptr++;
1570                                         continue;
1571                                 }
1572                         }
1573
1574                         lz_matchptr = cache_ptr + 1;
1575
1576                         /* Check for a length 2 match.  */
1577                         hash2 = lz_hash_2_bytes(in_next);
1578                         cur_match = c->hash2_tab[hash2];
1579                         c->hash2_tab[hash2] = in_next - in_begin;
1580                         if (matchfinder_node_valid(cur_match) &&
1581                             (LZX_HASH2_ORDER == 16 ||
1582                              load_u16_unaligned(&in_begin[cur_match]) ==
1583                              load_u16_unaligned(in_next)) &&
1584                             in_begin[cur_match + 2] != in_next[2])
1585                         {
1586                                 lz_matchptr->length = 2;
1587                                 lz_matchptr->offset = in_next - &in_begin[cur_match];
1588                                 lz_matchptr++;
1589                         }
1590
1591                         /* Check for matches of length >= 3.  */
1592                         lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1593                                                                  in_begin,
1594                                                                  in_next,
1595                                                                  3,
1596                                                                  max_len,
1597                                                                  nice_len,
1598                                                                  c->max_search_depth,
1599                                                                  &next_hash,
1600                                                                  &best_len,
1601                                                                  lz_matchptr);
1602                         in_next++;
1603                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1604                         cache_ptr = lz_matchptr;
1605
1606                         /*
1607                          * If there was a very long match found, then don't
1608                          * cache any matches for the bytes covered by that
1609                          * match.  This avoids degenerate behavior when
1610                          * compressing highly redundant data, where the number
1611                          * of matches can be very large.
1612                          *
1613                          * This heuristic doesn't actually hurt the compression
1614                          * ratio very much.  If there's a long match, then the
1615                          * data must be highly compressible, so it doesn't
1616                          * matter as much what we do.
1617                          */
1618                         if (best_len >= nice_len) {
1619                                 --best_len;
1620                                 do {
1621                                         if (unlikely(max_len > in_end - in_next)) {
1622                                                 max_len = in_end - in_next;
1623                                                 nice_len = min(max_len, nice_len);
1624                                                 if (unlikely(max_len <
1625                                                              max(LZ_HASH_REQUIRED_NBYTES, 3)))
1626                                                 {
1627                                                         in_next++;
1628                                                         cache_ptr->length = 0;
1629                                                         cache_ptr++;
1630                                                         continue;
1631                                                 }
1632                                         }
1633                                         c->hash2_tab[lz_hash_2_bytes(in_next)] =
1634                                                 in_next - in_begin;
1635                                         bt_matchfinder_skip_position(&c->bt_mf,
1636                                                                      in_begin,
1637                                                                      in_next,
1638                                                                      in_end,
1639                                                                      nice_len,
1640                                                                      c->max_search_depth,
1641                                                                      &next_hash);
1642                                         in_next++;
1643                                         cache_ptr->length = 0;
1644                                         cache_ptr++;
1645                                 } while (--best_len);
1646                         }
1647                 } while (in_next < in_block_end &&
1648                          likely(cache_ptr < c->cache_overflow_mark));
1649
1650                 /* We've finished running the block through the matchfinder.
1651                  * Now choose a match/literal sequence and write the block.  */
1652
1653                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1654                                                      in_next - in_block_begin,
1655                                                      queue);
1656         } while (in_next != in_end);
1657 }
1658
1659 /*
1660  * Given a pointer to the current byte sequence and the current list of recent
1661  * match offsets, find the longest repeat offset match.
1662  *
1663  * If no match of at least 2 bytes is found, then return 0.
1664  *
1665  * If a match of at least 2 bytes is found, then return its length and set
1666  * *rep_max_idx_ret to the index of its offset in @queue.
1667 */
1668 static unsigned
1669 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1670                                      const u32 bytes_remaining,
1671                                      struct lzx_lru_queue queue,
1672                                      unsigned *rep_max_idx_ret)
1673 {
1674         BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
1675         LZX_ASSERT(bytes_remaining >= 2);
1676
1677         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1678         const u16 next_2_bytes = load_u16_unaligned(in_next);
1679         const u8 *matchptr;
1680         unsigned rep_max_len;
1681         unsigned rep_max_idx;
1682         unsigned rep_len;
1683
1684         matchptr = in_next - lzx_lru_queue_pop(&queue);
1685         if (load_u16_unaligned(matchptr) == next_2_bytes)
1686                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1687         else
1688                 rep_max_len = 0;
1689         rep_max_idx = 0;
1690
1691         matchptr = in_next - lzx_lru_queue_pop(&queue);
1692         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1693                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1694                 if (rep_len > rep_max_len) {
1695                         rep_max_len = rep_len;
1696                         rep_max_idx = 1;
1697                 }
1698         }
1699
1700         matchptr = in_next - lzx_lru_queue_pop(&queue);
1701         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1702                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1703                 if (rep_len > rep_max_len) {
1704                         rep_max_len = rep_len;
1705                         rep_max_idx = 2;
1706                 }
1707         }
1708
1709         *rep_max_idx_ret = rep_max_idx;
1710         return rep_max_len;
1711 }
1712
1713 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1714 static inline unsigned
1715 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1716 {
1717         unsigned score = len;
1718
1719         if (adjusted_offset < 4096)
1720                 score++;
1721
1722         if (adjusted_offset < 256)
1723                 score++;
1724
1725         return score;
1726 }
1727
1728 static inline unsigned
1729 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1730 {
1731         return rep_len + 3;
1732 }
1733
1734 /* This is the "lazy" LZX compressor.  */
1735 static void
1736 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1737 {
1738         const u8 * const in_begin = c->in_buffer;
1739         const u8 *       in_next = in_begin;
1740         const u8 * const in_end  = in_begin + c->in_nbytes;
1741         unsigned max_len = LZX_MAX_MATCH_LEN;
1742         unsigned nice_len = min(c->nice_match_length, max_len);
1743         struct lzx_lru_queue queue;
1744
1745         hc_matchfinder_init(&c->hc_mf);
1746         lzx_lru_queue_init(&queue);
1747
1748         do {
1749                 /* Starting a new block  */
1750
1751                 const u8 * const in_block_begin = in_next;
1752                 const u8 * const in_block_end =
1753                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1754                 struct lzx_item *next_chosen_item = c->chosen_items;
1755                 unsigned cur_len;
1756                 u32 cur_offset;
1757                 u32 cur_offset_data;
1758                 unsigned cur_score;
1759                 unsigned next_len;
1760                 u32 next_offset;
1761                 u32 next_offset_data;
1762                 unsigned next_score;
1763                 unsigned rep_max_len;
1764                 unsigned rep_max_idx;
1765                 unsigned rep_score;
1766                 unsigned skip_len;
1767
1768                 lzx_reset_symbol_frequencies(c);
1769
1770                 do {
1771                         if (unlikely(max_len > in_end - in_next)) {
1772                                 max_len = in_end - in_next;
1773                                 nice_len = min(max_len, nice_len);
1774                         }
1775
1776                         /* Find the longest match at the current position.  */
1777
1778                         cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1779                                                                in_begin,
1780                                                                in_next,
1781                                                                2,
1782                                                                max_len,
1783                                                                nice_len,
1784                                                                c->max_search_depth,
1785                                                                &cur_offset);
1786                         if (cur_len < 3 ||
1787                             (cur_len == 3 &&
1788                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
1789                              cur_offset != lzx_lru_queue_R0(queue) &&
1790                              cur_offset != lzx_lru_queue_R1(queue) &&
1791                              cur_offset != lzx_lru_queue_R2(queue)))
1792                         {
1793                                 /* There was no match found, or the only match found
1794                                  * was a distant length 3 match.  Output a literal.  */
1795                                 lzx_declare_literal(c, *in_next++,
1796                                                     &next_chosen_item);
1797                                 continue;
1798                         }
1799
1800                         if (cur_offset == lzx_lru_queue_R0(queue)) {
1801                                 in_next++;
1802                                 cur_offset_data = 0;
1803                                 skip_len = cur_len - 1;
1804                                 goto choose_cur_match;
1805                         }
1806
1807                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
1808                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
1809
1810                         /* Consider a repeat offset match  */
1811                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1812                                                                            in_end - in_next,
1813                                                                            queue,
1814                                                                            &rep_max_idx);
1815                         in_next++;
1816
1817                         if (rep_max_len >= 3 &&
1818                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1819                                                                        rep_max_idx)) >= cur_score)
1820                         {
1821                                 cur_len = rep_max_len;
1822                                 cur_offset_data = rep_max_idx;
1823                                 skip_len = rep_max_len - 1;
1824                                 goto choose_cur_match;
1825                         }
1826
1827                 have_cur_match:
1828
1829                         /* We have a match at the current position.  */
1830
1831                         /* If we have a very long match, choose it immediately.  */
1832                         if (cur_len >= nice_len) {
1833                                 skip_len = cur_len - 1;
1834                                 goto choose_cur_match;
1835                         }
1836
1837                         /* See if there's a better match at the next position.  */
1838
1839                         if (unlikely(max_len > in_end - in_next)) {
1840                                 max_len = in_end - in_next;
1841                                 nice_len = min(max_len, nice_len);
1842                         }
1843
1844                         next_len = hc_matchfinder_longest_match(&c->hc_mf,
1845                                                                 in_begin,
1846                                                                 in_next,
1847                                                                 cur_len - 2,
1848                                                                 max_len,
1849                                                                 nice_len,
1850                                                                 c->max_search_depth / 2,
1851                                                                 &next_offset);
1852
1853                         if (next_len <= cur_len - 2) {
1854                                 in_next++;
1855                                 skip_len = cur_len - 2;
1856                                 goto choose_cur_match;
1857                         }
1858
1859                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
1860                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
1861
1862                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1863                                                                            in_end - in_next,
1864                                                                            queue,
1865                                                                            &rep_max_idx);
1866                         in_next++;
1867
1868                         if (rep_max_len >= 3 &&
1869                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1870                                                                        rep_max_idx)) >= next_score)
1871                         {
1872
1873                                 if (rep_score > cur_score) {
1874                                         /* The next match is better, and it's a
1875                                          * repeat offset match.  */
1876                                         lzx_declare_literal(c, *(in_next - 2),
1877                                                             &next_chosen_item);
1878                                         cur_len = rep_max_len;
1879                                         cur_offset_data = rep_max_idx;
1880                                         skip_len = cur_len - 1;
1881                                         goto choose_cur_match;
1882                                 }
1883                         } else {
1884                                 if (next_score > cur_score) {
1885                                         /* The next match is better, and it's an
1886                                          * explicit offset match.  */
1887                                         lzx_declare_literal(c, *(in_next - 2),
1888                                                             &next_chosen_item);
1889                                         cur_len = next_len;
1890                                         cur_offset_data = next_offset_data;
1891                                         cur_score = next_score;
1892                                         goto have_cur_match;
1893                                 }
1894                         }
1895
1896                         /* The original match was better.  */
1897                         skip_len = cur_len - 2;
1898
1899                 choose_cur_match:
1900                         if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) {
1901                                 lzx_declare_repeat_offset_match(c, cur_len,
1902                                                                 cur_offset_data,
1903                                                                 &next_chosen_item);
1904                                 queue = lzx_lru_queue_swap(queue, cur_offset_data);
1905                         } else {
1906                                 lzx_declare_explicit_offset_match(c, cur_len,
1907                                                                   cur_offset_data - LZX_OFFSET_ADJUSTMENT,
1908                                                                   &next_chosen_item);
1909                                 queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT);
1910                         }
1911
1912                         hc_matchfinder_skip_positions(&c->hc_mf,
1913                                                       in_begin,
1914                                                       in_next,
1915                                                       in_end,
1916                                                       skip_len);
1917                         in_next += skip_len;
1918                 } while (in_next < in_block_end);
1919
1920                 lzx_finish_block(c, os, in_next - in_block_begin,
1921                                  next_chosen_item - c->chosen_items);
1922         } while (in_next != in_end);
1923 }
1924
1925 static void
1926 lzx_init_offset_slot_fast(struct lzx_compressor *c)
1927 {
1928         u8 slot = 0;
1929
1930         for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
1931
1932                 while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1])
1933                         slot++;
1934
1935                 c->offset_slot_fast[offset] = slot;
1936         }
1937 }
1938
1939 static size_t
1940 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
1941 {
1942         if (compression_level <= LZX_MAX_FAST_LEVEL) {
1943                 return offsetof(struct lzx_compressor, hc_mf) +
1944                         hc_matchfinder_size(max_bufsize);
1945         } else {
1946                 return offsetof(struct lzx_compressor, bt_mf) +
1947                         bt_matchfinder_size(max_bufsize);
1948         }
1949 }
1950
1951 static u64
1952 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
1953 {
1954         u64 size = 0;
1955
1956         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
1957                 return 0;
1958
1959         size += lzx_get_compressor_size(max_bufsize, compression_level);
1960         size += max_bufsize; /* in_buffer */
1961         return size;
1962 }
1963
1964 static int
1965 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
1966                       void **c_ret)
1967 {
1968         unsigned window_order;
1969         struct lzx_compressor *c;
1970
1971         window_order = lzx_get_window_order(max_bufsize);
1972         if (window_order == 0)
1973                 return WIMLIB_ERR_INVALID_PARAM;
1974
1975         c = ALIGNED_MALLOC(lzx_get_compressor_size(max_bufsize,
1976                                                    compression_level),
1977                            MATCHFINDER_ALIGNMENT);
1978         if (!c)
1979                 goto oom0;
1980
1981         c->num_main_syms = lzx_get_num_main_syms(window_order);
1982         c->window_order = window_order;
1983
1984         c->in_buffer = MALLOC(max_bufsize);
1985         if (!c->in_buffer)
1986                 goto oom1;
1987
1988         if (compression_level <= LZX_MAX_FAST_LEVEL) {
1989
1990                 /* Fast compression: Use lazy parsing.  */
1991
1992                 c->impl = lzx_compress_lazy;
1993                 c->max_search_depth = (36 * compression_level) / 20;
1994                 c->nice_match_length = min((72 * compression_level) / 20,
1995                                            LZX_MAX_MATCH_LEN);
1996
1997         } else {
1998
1999                 /* Normal / high compression: Use near-optimal parsing.  */
2000
2001                 c->impl = lzx_compress_near_optimal;
2002
2003                 /* Scale nice_match_length and max_search_depth with the
2004                  * compression level.  */
2005                 c->max_search_depth = (24 * compression_level) / 50;
2006                 c->nice_match_length = min((32 * compression_level) / 50,
2007                                            LZX_MAX_MATCH_LEN);
2008
2009                 /* Set a number of optimization passes appropriate for the
2010                  * compression level.  */
2011
2012                 c->num_optim_passes = 1;
2013
2014                 if (compression_level >= 45)
2015                         c->num_optim_passes++;
2016
2017                 /* Use more optimization passes for higher compression levels.
2018                  * But the more passes there are, the less they help --- so
2019                  * don't add them linearly.  */
2020                 if (compression_level >= 70) {
2021                         c->num_optim_passes++;
2022                         if (compression_level >= 100)
2023                                 c->num_optim_passes++;
2024                         if (compression_level >= 150)
2025                                 c->num_optim_passes++;
2026                         if (compression_level >= 200)
2027                                 c->num_optim_passes++;
2028                         if (compression_level >= 300)
2029                                 c->num_optim_passes++;
2030                 }
2031
2032                 c->cache_overflow_mark = &c->match_cache[LZX_CACHE_LEN];
2033         }
2034
2035         lzx_init_offset_slot_fast(c);
2036         *c_ret = c;
2037         return 0;
2038
2039 oom1:
2040         ALIGNED_FREE(c);
2041 oom0:
2042         return WIMLIB_ERR_NOMEM;
2043 }
2044
2045 static size_t
2046 lzx_compress(const void *in, size_t in_nbytes,
2047              void *out, size_t out_nbytes_avail, void *_c)
2048 {
2049         struct lzx_compressor *c = _c;
2050         struct lzx_output_bitstream os;
2051
2052         /* Don't bother trying to compress very small inputs.  */
2053         if (in_nbytes < 100)
2054                 return 0;
2055
2056         /* Copy the input data into the internal buffer and preprocess it.  */
2057         memcpy(c->in_buffer, in, in_nbytes);
2058         c->in_nbytes = in_nbytes;
2059         lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2060
2061         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2062         c->codes_index = 0;
2063         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2064
2065         /* Initialize the output bitstream.  */
2066         lzx_init_output(&os, out, out_nbytes_avail);
2067
2068         /* Call the compression level-specific compress() function.  */
2069         (*c->impl)(c, &os);
2070
2071         /* Flush the output bitstream and return the compressed size or 0.  */
2072         return lzx_flush_output(&os);
2073 }
2074
2075 static void
2076 lzx_free_compressor(void *_c)
2077 {
2078         struct lzx_compressor *c = _c;
2079
2080         FREE(c->in_buffer);
2081         ALIGNED_FREE(c);
2082 }
2083
2084 const struct compressor_ops lzx_compressor_ops = {
2085         .get_needed_memory  = lzx_get_needed_memory,
2086         .create_compressor  = lzx_create_compressor,
2087         .compress           = lzx_compress,
2088         .free_compressor    = lzx_free_compressor,
2089 };