]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
b2c95ba29f6285b9c9281dc01082c70eefd0003c
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior (immediately terminating the block) on
83  * cache overflow is still required.
84  */
85 #define LZX_CACHE_PER_POS       7
86
87 /*
88  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89  * excluding the extra "overflow" entries.  The per-position multiplier is '1 +
90  * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91  * overhead of one lz_match per position, used to hold the match count at that
92  * position.
93  */
94 #define LZX_CACHE_LENGTH        (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
95
96 /*
97  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98  * ever be saved in the match cache for a single position.  Since each match we
99  * save for a single position has a distinct length, we can use the number of
100  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
101  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102  * it will never actually be reached.
103  */
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
105
106 /*
107  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108  * This makes it possible to consider fractional bit costs.
109  *
110  * Note: this is only useful as a statistical trick for when the true costs are
111  * unknown.  In reality, each token in LZX requires a whole number of bits to
112  * output.
113  */
114 #define LZX_BIT_COST            16
115
116 /*
117  * Consideration of aligned offset costs is disabled for now, due to
118  * insufficient benefit gained from the time spent.
119  */
120 #define LZX_CONSIDER_ALIGNED_COSTS      0
121
122 /*
123  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
124  * faster algorithm.
125  */
126 #define LZX_MAX_FAST_LEVEL      34
127
128 /*
129  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130  * for finding length 2 matches.  This can be as high as 16 (in which case the
131  * hash function is trivial), but using a smaller hash table speeds up
132  * compression due to reduced cache pressure.
133  */
134 #define LZX_HASH2_ORDER         12
135 #define LZX_HASH2_LENGTH        (1UL << LZX_HASH2_ORDER)
136
137 #include "wimlib/lzx_common.h"
138
139 /*
140  * The maximum allowed window order for the matchfinder.
141  */
142 #define MATCHFINDER_MAX_WINDOW_ORDER    LZX_MAX_WINDOW_ORDER
143
144 #include <string.h>
145
146 #include "wimlib/bt_matchfinder.h"
147 #include "wimlib/compress_common.h"
148 #include "wimlib/compressor_ops.h"
149 #include "wimlib/error.h"
150 #include "wimlib/hc_matchfinder.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/unaligned.h"
153 #include "wimlib/util.h"
154
155 struct lzx_output_bitstream;
156
157 /* Codewords for the LZX Huffman codes.  */
158 struct lzx_codewords {
159         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
160         u32 len[LZX_LENCODE_NUM_SYMBOLS];
161         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
162 };
163
164 /* Codeword lengths (in bits) for the LZX Huffman codes.
165  * A zero length means the corresponding codeword has zero frequency.  */
166 struct lzx_lens {
167         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
168         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
169         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
170 };
171
172 /* Cost model for near-optimal parsing  */
173 struct lzx_costs {
174
175         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
176          * length 'len' match that has an offset belonging to 'offset_slot'.  */
177         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
178
179         /* Cost for each symbol in the main code  */
180         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
181
182         /* Cost for each symbol in the length code  */
183         u32 len[LZX_LENCODE_NUM_SYMBOLS];
184
185 #if LZX_CONSIDER_ALIGNED_COSTS
186         /* Cost for each symbol in the aligned code  */
187         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
188 #endif
189 };
190
191 /* Codewords and lengths for the LZX Huffman codes.  */
192 struct lzx_codes {
193         struct lzx_codewords codewords;
194         struct lzx_lens lens;
195 };
196
197 /* Symbol frequency counters for the LZX Huffman codes.  */
198 struct lzx_freqs {
199         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
200         u32 len[LZX_LENCODE_NUM_SYMBOLS];
201         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
202 };
203
204 /* Intermediate LZX match/literal format  */
205 struct lzx_item {
206
207         /* Bits 0  -  9: Main symbol
208          * Bits 10 - 17: Length symbol
209          * Bits 18 - 22: Number of extra offset bits
210          * Bits 23+    : Extra offset bits  */
211         u64 data;
212 };
213
214 /*
215  * This structure represents a byte position in the input buffer and a node in
216  * the graph of possible match/literal choices.
217  *
218  * Logically, each incoming edge to this node is labeled with a literal or a
219  * match that can be taken to reach this position from an earlier position; and
220  * each outgoing edge from this node is labeled with a literal or a match that
221  * can be taken to advance from this position to a later position.
222  */
223 struct lzx_optimum_node {
224
225         /* The cost, in bits, of the lowest-cost path that has been found to
226          * reach this position.  This can change as progressively lower cost
227          * paths are found to reach this position.  */
228         u32 cost;
229
230         /*
231          * The match or literal that was taken to reach this position.  This can
232          * change as progressively lower cost paths are found to reach this
233          * position.
234          *
235          * This variable is divided into two bitfields.
236          *
237          * Literals:
238          *      Low bits are 1, high bits are the literal.
239          *
240          * Explicit offset matches:
241          *      Low bits are the match length, high bits are the offset plus 2.
242          *
243          * Repeat offset matches:
244          *      Low bits are the match length, high bits are the queue index.
245          */
246         u32 item;
247 #define OPTIMUM_OFFSET_SHIFT 9
248 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
249 } _aligned_attribute(8);
250
251 /*
252  * Least-recently-used queue for match offsets.
253  *
254  * This is represented as a 64-bit integer for efficiency.  There are three
255  * offsets of 21 bits each.  Bit 64 is garbage.
256  */
257 struct lzx_lru_queue {
258         u64 R;
259 };
260
261 #define LZX_QUEUE64_OFFSET_SHIFT 21
262 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
263
264 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
265 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
266 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
267
268 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
269 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
270 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
271
272 static inline void
273 lzx_lru_queue_init(struct lzx_lru_queue *queue)
274 {
275         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
276                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
277                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
278 }
279
280 static inline u64
281 lzx_lru_queue_R0(struct lzx_lru_queue queue)
282 {
283         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
284 }
285
286 static inline u64
287 lzx_lru_queue_R1(struct lzx_lru_queue queue)
288 {
289         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
290 }
291
292 static inline u64
293 lzx_lru_queue_R2(struct lzx_lru_queue queue)
294 {
295         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
296 }
297
298 /* Push a match offset onto the front (most recently used) end of the queue.  */
299 static inline struct lzx_lru_queue
300 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
301 {
302         return (struct lzx_lru_queue) {
303                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
304         };
305 }
306
307 /* Pop a match offset off the front (most recently used) end of the queue.  */
308 static inline u32
309 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
310 {
311         u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
312         queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
313         return offset;
314 }
315
316 /* Swap a match offset to the front of the queue.  */
317 static inline struct lzx_lru_queue
318 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
319 {
320         if (idx == 0)
321                 return queue;
322
323         if (idx == 1)
324                 return (struct lzx_lru_queue) {
325                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
326                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
327                              (queue.R & LZX_QUEUE64_R2_MASK),
328                 };
329
330         return (struct lzx_lru_queue) {
331                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
332                      (queue.R & LZX_QUEUE64_R1_MASK) |
333                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
334         };
335 }
336
337 /* The main LZX compressor structure  */
338 struct lzx_compressor {
339
340         /* The "nice" match length: if a match of this length is found, then
341          * choose it immediately without further consideration.  */
342         unsigned nice_match_length;
343
344         /* The maximum search depth: consider at most this many potential
345          * matches at each position.  */
346         unsigned max_search_depth;
347
348         /* The log base 2 of the LZX window size for LZ match offset encoding
349          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
350          * LZX_MAX_WINDOW_ORDER.  */
351         unsigned window_order;
352
353         /* The number of symbols in the main alphabet.  This depends on
354          * @window_order, since @window_order determines the maximum possible
355          * offset.  */
356         unsigned num_main_syms;
357
358         /* Number of optimization passes per block  */
359         unsigned num_optim_passes;
360
361         /* The preprocessed buffer of data being compressed  */
362         u8 *in_buffer;
363
364         /* The number of bytes of data to be compressed, which is the number of
365          * bytes of data in @in_buffer that are actually valid.  */
366         size_t in_nbytes;
367
368         /* Pointer to the compress() implementation chosen at allocation time */
369         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
370
371         /* If true, the compressor need not preserve the input buffer if it
372          * compresses the data successfully.  */
373         bool destructive;
374
375         /* The Huffman symbol frequency counters for the current block.  */
376         struct lzx_freqs freqs;
377
378         /* The Huffman codes for the current and previous blocks.  The one with
379          * index 'codes_index' is for the current block, and the other one is
380          * for the previous block.  */
381         struct lzx_codes codes[2];
382         unsigned codes_index;
383
384         /*
385          * The match/literal sequence the algorithm chose for the current block.
386          *
387          * Notes on how large this array actually needs to be:
388          *
389          * - In lzx_compress_near_optimal(), the maximum block size is
390          *   'LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1' bytes.  This occurs if
391          *   a match of the maximum length is found on the last byte.  Although
392          *   it is impossible for this particular case to actually result in a
393          *   parse of all literals, we reserve this many spaces anyway.
394          *
395          * - The worst case for lzx_compress_lazy() is a block of almost all
396          *   literals that ends with a series of matches of increasing scores,
397          *   causing a sequence of literals to be chosen before the last match
398          *   is finally chosen.  The number of items actually chosen in this
399          *   scenario is limited by the number of distinct match scores that
400          *   exist for matches shorter than 'nice_match_length'.  Having
401          *   'LZX_MAX_MATCH_LEN - 1' extra spaces is plenty for now.
402          */
403         struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1];
404
405         /* Table mapping match offset => offset slot for small offsets  */
406 #define LZX_NUM_FAST_OFFSETS 32768
407         u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
408
409         union {
410                 /* Data for greedy or lazy parsing  */
411                 struct {
412                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
413                         struct hc_matchfinder hc_mf;
414                 };
415
416                 /* Data for near-optimal parsing  */
417                 struct {
418                         /*
419                          * The graph nodes for the current block.
420                          *
421                          * We need at least 'LZX_DIV_BLOCK_SIZE +
422                          * LZX_MAX_MATCH_LEN - 1' nodes because that is the
423                          * maximum block size that may be used.  Add 1 because
424                          * we need a node to represent end-of-block.
425                          *
426                          * It is possible that nodes past end-of-block are
427                          * accessed during match consideration, but this can
428                          * only occur if the block was truncated at
429                          * LZX_DIV_BLOCK_SIZE.  So the same bound still applies.
430                          * Note that since nodes past the end of the block will
431                          * never actually have an effect on the items that are
432                          * chosen for the block, it makes no difference what
433                          * their costs are initialized to (if anything).
434                          */
435                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
436                                                               LZX_MAX_MATCH_LEN - 1 + 1];
437
438                         /* The cost model for the current block  */
439                         struct lzx_costs costs;
440
441                         /*
442                          * Cached matches for the current block.  This array
443                          * contains the matches that were found at each position
444                          * in the block.  Specifically, for each position, there
445                          * is a special 'struct lz_match' whose 'length' field
446                          * contains the number of matches that were found at
447                          * that position; this is followed by the matches
448                          * themselves, if any, sorted by strictly increasing
449                          * length.
450                          *
451                          * Note: in rare cases, there will be a very high number
452                          * of matches in the block and this array will overflow.
453                          * If this happens, we force the end of the current
454                          * block.  LZX_CACHE_LENGTH is the length at which we
455                          * actually check for overflow.  The extra slots beyond
456                          * this are enough to absorb the worst case overflow,
457                          * which occurs if starting at
458                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
459                          * match count header, then write
460                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
461                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
462                          * write the match count header for each.
463                          */
464                         struct lz_match match_cache[LZX_CACHE_LENGTH +
465                                                     LZX_MAX_MATCHES_PER_POS +
466                                                     LZX_MAX_MATCH_LEN - 1];
467
468                         /* Hash table for finding length 2 matches  */
469                         pos_t hash2_tab[LZX_HASH2_LENGTH];
470
471                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
472                         struct bt_matchfinder bt_mf;
473                 };
474         };
475 };
476
477 /*
478  * Structure to keep track of the current state of sending bits to the
479  * compressed output buffer.
480  *
481  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
482  */
483 struct lzx_output_bitstream {
484
485         /* Bits that haven't yet been written to the output buffer.  */
486         u32 bitbuf;
487
488         /* Number of bits currently held in @bitbuf.  */
489         u32 bitcount;
490
491         /* Pointer to the start of the output buffer.  */
492         u8 *start;
493
494         /* Pointer to the position in the output buffer at which the next coding
495          * unit should be written.  */
496         u8 *next;
497
498         /* Pointer just past the end of the output buffer, rounded down to a
499          * 2-byte boundary.  */
500         u8 *end;
501 };
502
503 /*
504  * Initialize the output bitstream.
505  *
506  * @os
507  *      The output bitstream structure to initialize.
508  * @buffer
509  *      The buffer being written to.
510  * @size
511  *      Size of @buffer, in bytes.
512  */
513 static void
514 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
515 {
516         os->bitbuf = 0;
517         os->bitcount = 0;
518         os->start = buffer;
519         os->next = os->start;
520         os->end = os->start + (size & ~1);
521 }
522
523 /*
524  * Write some bits to the output bitstream.
525  *
526  * The bits are given by the low-order @num_bits bits of @bits.  Higher-order
527  * bits in @bits cannot be set.  At most 17 bits can be written at once.
528  *
529  * @max_num_bits is a compile-time constant that specifies the maximum number of
530  * bits that can ever be written at the call site.  It is used to optimize away
531  * the conditional code for writing a second 16-bit coding unit when writing
532  * fewer than 17 bits.
533  *
534  * If the output buffer space is exhausted, then the bits will be ignored, and
535  * lzx_flush_output() will return 0 when it gets called.
536  */
537 static inline void
538 lzx_write_varbits(struct lzx_output_bitstream *os,
539                   const u32 bits, const unsigned num_bits,
540                   const unsigned max_num_bits)
541 {
542         /* This code is optimized for LZX, which never needs to write more than
543          * 17 bits at once.  */
544         LZX_ASSERT(num_bits <= 17);
545         LZX_ASSERT(num_bits <= max_num_bits);
546         LZX_ASSERT(os->bitcount <= 15);
547
548         /* Add the bits to the bit buffer variable.  @bitcount will be at most
549          * 15, so there will be just enough space for the maximum possible
550          * @num_bits of 17.  */
551         os->bitcount += num_bits;
552         os->bitbuf = (os->bitbuf << num_bits) | bits;
553
554         /* Check whether any coding units need to be written.  */
555         if (os->bitcount >= 16) {
556
557                 os->bitcount -= 16;
558
559                 /* Write a coding unit, unless it would overflow the buffer.  */
560                 if (os->next != os->end) {
561                         put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next);
562                         os->next += 2;
563                 }
564
565                 /* If writing 17 bits, a second coding unit might need to be
566                  * written.  But because 'max_num_bits' is a compile-time
567                  * constant, the compiler will optimize away this code at most
568                  * call sites.  */
569                 if (max_num_bits == 17 && os->bitcount == 16) {
570                         if (os->next != os->end) {
571                                 put_unaligned_u16_le(os->bitbuf, os->next);
572                                 os->next += 2;
573                         }
574                         os->bitcount = 0;
575                 }
576         }
577 }
578
579 /* Use when @num_bits is a compile-time constant.  Otherwise use
580  * lzx_write_varbits().  */
581 static inline void
582 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
583 {
584         lzx_write_varbits(os, bits, num_bits, num_bits);
585 }
586
587 /*
588  * Flush the last coding unit to the output buffer if needed.  Return the total
589  * number of bytes written to the output buffer, or 0 if an overflow occurred.
590  */
591 static u32
592 lzx_flush_output(struct lzx_output_bitstream *os)
593 {
594         if (os->next == os->end)
595                 return 0;
596
597         if (os->bitcount != 0) {
598                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
599                 os->next += 2;
600         }
601
602         return os->next - os->start;
603 }
604
605 /* Build the main, length, and aligned offset Huffman codes used in LZX.
606  *
607  * This takes as input the frequency tables for each code and produces as output
608  * a set of tables that map symbols to codewords and codeword lengths.  */
609 static void
610 lzx_make_huffman_codes(struct lzx_compressor *c)
611 {
612         const struct lzx_freqs *freqs = &c->freqs;
613         struct lzx_codes *codes = &c->codes[c->codes_index];
614
615         make_canonical_huffman_code(c->num_main_syms,
616                                     LZX_MAX_MAIN_CODEWORD_LEN,
617                                     freqs->main,
618                                     codes->lens.main,
619                                     codes->codewords.main);
620
621         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
622                                     LZX_MAX_LEN_CODEWORD_LEN,
623                                     freqs->len,
624                                     codes->lens.len,
625                                     codes->codewords.len);
626
627         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
628                                     LZX_MAX_ALIGNED_CODEWORD_LEN,
629                                     freqs->aligned,
630                                     codes->lens.aligned,
631                                     codes->codewords.aligned);
632 }
633
634 /* Reset the symbol frequencies for the LZX Huffman codes.  */
635 static void
636 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
637 {
638         memset(&c->freqs, 0, sizeof(c->freqs));
639 }
640
641 static unsigned
642 lzx_compute_precode_items(const u8 lens[restrict],
643                           const u8 prev_lens[restrict],
644                           u32 precode_freqs[restrict],
645                           unsigned precode_items[restrict])
646 {
647         unsigned *itemptr;
648         unsigned run_start;
649         unsigned run_end;
650         unsigned extra_bits;
651         int delta;
652         u8 len;
653
654         itemptr = precode_items;
655         run_start = 0;
656
657         while (!((len = lens[run_start]) & 0x80)) {
658
659                 /* len = the length being repeated  */
660
661                 /* Find the next run of codeword lengths.  */
662
663                 run_end = run_start + 1;
664
665                 /* Fast case for a single length.  */
666                 if (likely(len != lens[run_end])) {
667                         delta = prev_lens[run_start] - len;
668                         if (delta < 0)
669                                 delta += 17;
670                         precode_freqs[delta]++;
671                         *itemptr++ = delta;
672                         run_start++;
673                         continue;
674                 }
675
676                 /* Extend the run.  */
677                 do {
678                         run_end++;
679                 } while (len == lens[run_end]);
680
681                 if (len == 0) {
682                         /* Run of zeroes.  */
683
684                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
685                         while ((run_end - run_start) >= 20) {
686                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
687                                 precode_freqs[18]++;
688                                 *itemptr++ = 18 | (extra_bits << 5);
689                                 run_start += 20 + extra_bits;
690                         }
691
692                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
693                         if ((run_end - run_start) >= 4) {
694                                 extra_bits = min((run_end - run_start) - 4, 0xf);
695                                 precode_freqs[17]++;
696                                 *itemptr++ = 17 | (extra_bits << 5);
697                                 run_start += 4 + extra_bits;
698                         }
699                 } else {
700
701                         /* A run of nonzero lengths. */
702
703                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
704                         while ((run_end - run_start) >= 4) {
705                                 extra_bits = (run_end - run_start) > 4;
706                                 delta = prev_lens[run_start] - len;
707                                 if (delta < 0)
708                                         delta += 17;
709                                 precode_freqs[19]++;
710                                 precode_freqs[delta]++;
711                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
712                                 run_start += 4 + extra_bits;
713                         }
714                 }
715
716                 /* Output any remaining lengths without RLE.  */
717                 while (run_start != run_end) {
718                         delta = prev_lens[run_start] - len;
719                         if (delta < 0)
720                                 delta += 17;
721                         precode_freqs[delta]++;
722                         *itemptr++ = delta;
723                         run_start++;
724                 }
725         }
726
727         return itemptr - precode_items;
728 }
729
730 /*
731  * Output a Huffman code in the compressed form used in LZX.
732  *
733  * The Huffman code is represented in the output as a logical series of codeword
734  * lengths from which the Huffman code, which must be in canonical form, can be
735  * reconstructed.
736  *
737  * The codeword lengths are themselves compressed using a separate Huffman code,
738  * the "precode", which contains a symbol for each possible codeword length in
739  * the larger code as well as several special symbols to represent repeated
740  * codeword lengths (a form of run-length encoding).  The precode is itself
741  * constructed in canonical form, and its codeword lengths are represented
742  * literally in 20 4-bit fields that immediately precede the compressed codeword
743  * lengths of the larger code.
744  *
745  * Furthermore, the codeword lengths of the larger code are actually represented
746  * as deltas from the codeword lengths of the corresponding code in the previous
747  * block.
748  *
749  * @os:
750  *      Bitstream to which to write the compressed Huffman code.
751  * @lens:
752  *      The codeword lengths, indexed by symbol, in the Huffman code.
753  * @prev_lens:
754  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
755  *      code in the previous block, or all zeroes if this is the first block.
756  * @num_lens:
757  *      The number of symbols in the Huffman code.
758  */
759 static void
760 lzx_write_compressed_code(struct lzx_output_bitstream *os,
761                           const u8 lens[restrict],
762                           const u8 prev_lens[restrict],
763                           unsigned num_lens)
764 {
765         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
766         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
767         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
768         unsigned precode_items[num_lens];
769         unsigned num_precode_items;
770         unsigned precode_item;
771         unsigned precode_sym;
772         unsigned i;
773         u8 saved = lens[num_lens];
774         *(u8 *)(lens + num_lens) = 0x80;
775
776         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
777                 precode_freqs[i] = 0;
778
779         /* Compute the "items" (RLE / literal tokens and extra bits) with which
780          * the codeword lengths in the larger code will be output.  */
781         num_precode_items = lzx_compute_precode_items(lens,
782                                                       prev_lens,
783                                                       precode_freqs,
784                                                       precode_items);
785
786         /* Build the precode.  */
787         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
788                                     LZX_MAX_PRE_CODEWORD_LEN,
789                                     precode_freqs, precode_lens,
790                                     precode_codewords);
791
792         /* Output the lengths of the codewords in the precode.  */
793         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
794                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
795
796         /* Output the encoded lengths of the codewords in the larger code.  */
797         for (i = 0; i < num_precode_items; i++) {
798                 precode_item = precode_items[i];
799                 precode_sym = precode_item & 0x1F;
800                 lzx_write_varbits(os, precode_codewords[precode_sym],
801                                   precode_lens[precode_sym],
802                                   LZX_MAX_PRE_CODEWORD_LEN);
803                 if (precode_sym >= 17) {
804                         if (precode_sym == 17) {
805                                 lzx_write_bits(os, precode_item >> 5, 4);
806                         } else if (precode_sym == 18) {
807                                 lzx_write_bits(os, precode_item >> 5, 5);
808                         } else {
809                                 lzx_write_bits(os, (precode_item >> 5) & 1, 1);
810                                 precode_sym = precode_item >> 6;
811                                 lzx_write_varbits(os, precode_codewords[precode_sym],
812                                                   precode_lens[precode_sym],
813                                                   LZX_MAX_PRE_CODEWORD_LEN);
814                         }
815                 }
816         }
817
818         *(u8 *)(lens + num_lens) = saved;
819 }
820
821 /* Output a match or literal.  */
822 static inline void
823 lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
824                unsigned ones_if_aligned, const struct lzx_codes *codes)
825 {
826         u64 data = item.data;
827         unsigned main_symbol;
828         unsigned len_symbol;
829         unsigned num_extra_bits;
830         u32 extra_bits;
831
832         main_symbol = data & 0x3FF;
833
834         lzx_write_varbits(os, codes->codewords.main[main_symbol],
835                           codes->lens.main[main_symbol],
836                           LZX_MAX_MAIN_CODEWORD_LEN);
837
838         if (main_symbol < LZX_NUM_CHARS)  /* Literal?  */
839                 return;
840
841         len_symbol = (data >> 10) & 0xFF;
842
843         if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
844                 lzx_write_varbits(os, codes->codewords.len[len_symbol],
845                                   codes->lens.len[len_symbol],
846                                   LZX_MAX_LEN_CODEWORD_LEN);
847         }
848
849         num_extra_bits = (data >> 18) & 0x1F;
850         if (num_extra_bits == 0)  /* Small offset or repeat offset match?  */
851                 return;
852
853         extra_bits = data >> 23;
854
855         if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
856
857                 /* Aligned offset blocks: The low 3 bits of the extra offset
858                  * bits are Huffman-encoded using the aligned offset code.  The
859                  * remaining bits are output literally.  */
860
861                 lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
862                                   num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
863                                   17 - LZX_NUM_ALIGNED_OFFSET_BITS);
864
865                 lzx_write_varbits(os,
866                                   codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
867                                   codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
868                                   LZX_MAX_ALIGNED_CODEWORD_LEN);
869         } else {
870                 /* Verbatim blocks, or fewer than 3 extra bits:  All extra
871                  * offset bits are output literally.  */
872                 lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
873         }
874 }
875
876 /*
877  * Write all matches and literal bytes (which were precomputed) in an LZX
878  * compressed block to the output bitstream in the final compressed
879  * representation.
880  *
881  * @os
882  *      The output bitstream.
883  * @block_type
884  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
885  *      LZX_BLOCKTYPE_VERBATIM).
886  * @items
887  *      The array of matches/literals to output.
888  * @num_items
889  *      Number of matches/literals to output (length of @items).
890  * @codes
891  *      The main, length, and aligned offset Huffman codes for the current
892  *      LZX compressed block.
893  */
894 static void
895 lzx_write_items(struct lzx_output_bitstream *os, int block_type,
896                 const struct lzx_item items[], u32 num_items,
897                 const struct lzx_codes *codes)
898 {
899         unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
900
901         for (u32 i = 0; i < num_items; i++)
902                 lzx_write_item(os, items[i], ones_if_aligned, codes);
903 }
904
905 static void
906 lzx_write_compressed_block(int block_type,
907                            u32 block_size,
908                            unsigned window_order,
909                            unsigned num_main_syms,
910                            const struct lzx_item chosen_items[],
911                            u32 num_chosen_items,
912                            const struct lzx_codes * codes,
913                            const struct lzx_lens * prev_lens,
914                            struct lzx_output_bitstream * os)
915 {
916         LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
917                    block_type == LZX_BLOCKTYPE_VERBATIM);
918
919         /* The first three bits indicate the type of block and are one of the
920          * LZX_BLOCKTYPE_* constants.  */
921         lzx_write_bits(os, block_type, 3);
922
923         /* Output the block size.
924          *
925          * The original LZX format seemed to always encode the block size in 3
926          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
927          * uses the first bit to indicate whether the block is the default size
928          * (32768) or a different size given explicitly by the next 16 bits.
929          *
930          * By default, this compressor uses a window size of 32768 and therefore
931          * follows the WIMGAPI behavior.  However, this compressor also supports
932          * window sizes greater than 32768 bytes, which do not appear to be
933          * supported by WIMGAPI.  In such cases, we retain the default size bit
934          * to mean a size of 32768 bytes but output non-default block size in 24
935          * bits rather than 16.  The compatibility of this behavior is unknown
936          * because WIMs created with chunk size greater than 32768 can seemingly
937          * only be opened by wimlib anyway.  */
938         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
939                 lzx_write_bits(os, 1, 1);
940         } else {
941                 lzx_write_bits(os, 0, 1);
942
943                 if (window_order >= 16)
944                         lzx_write_bits(os, block_size >> 16, 8);
945
946                 lzx_write_bits(os, block_size & 0xFFFF, 16);
947         }
948
949         /* If it's an aligned offset block, output the aligned offset code.  */
950         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
951                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
952                         lzx_write_bits(os, codes->lens.aligned[i],
953                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
954                 }
955         }
956
957         /* Output the main code (two parts).  */
958         lzx_write_compressed_code(os, codes->lens.main,
959                                   prev_lens->main,
960                                   LZX_NUM_CHARS);
961         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
962                                   prev_lens->main + LZX_NUM_CHARS,
963                                   num_main_syms - LZX_NUM_CHARS);
964
965         /* Output the length code.  */
966         lzx_write_compressed_code(os, codes->lens.len,
967                                   prev_lens->len,
968                                   LZX_LENCODE_NUM_SYMBOLS);
969
970         /* Output the compressed matches and literals.  */
971         lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
972 }
973
974 /* Given the frequencies of symbols in an LZX-compressed block and the
975  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
976  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
977  * will take fewer bits to output.  */
978 static int
979 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
980                                const struct lzx_codes * codes)
981 {
982         u32 aligned_cost = 0;
983         u32 verbatim_cost = 0;
984
985         /* A verbatim block requires 3 bits in each place that an aligned symbol
986          * would be used in an aligned offset block.  */
987         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
988                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
989                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
990         }
991
992         /* Account for output of the aligned offset code.  */
993         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
994
995         if (aligned_cost < verbatim_cost)
996                 return LZX_BLOCKTYPE_ALIGNED;
997         else
998                 return LZX_BLOCKTYPE_VERBATIM;
999 }
1000
1001 /*
1002  * Finish an LZX block:
1003  *
1004  * - build the Huffman codes
1005  * - decide whether to output the block as VERBATIM or ALIGNED
1006  * - output the block
1007  * - swap the indices of the current and previous Huffman codes
1008  */
1009 static void
1010 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1011                  u32 block_size, u32 num_chosen_items)
1012 {
1013         int block_type;
1014
1015         lzx_make_huffman_codes(c);
1016
1017         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1018                                                     &c->codes[c->codes_index]);
1019         lzx_write_compressed_block(block_type,
1020                                    block_size,
1021                                    c->window_order,
1022                                    c->num_main_syms,
1023                                    c->chosen_items,
1024                                    num_chosen_items,
1025                                    &c->codes[c->codes_index],
1026                                    &c->codes[c->codes_index ^ 1].lens,
1027                                    os);
1028         c->codes_index ^= 1;
1029 }
1030
1031 /* Return the offset slot for the specified offset, which must be
1032  * less than LZX_NUM_FAST_OFFSETS.  */
1033 static inline unsigned
1034 lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset)
1035 {
1036         LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS);
1037         return c->offset_slot_fast[offset];
1038 }
1039
1040 /* Tally, and optionally record, the specified literal byte.  */
1041 static inline void
1042 lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
1043                     struct lzx_item **next_chosen_item)
1044 {
1045         unsigned main_symbol = lzx_main_symbol_for_literal(literal);
1046
1047         c->freqs.main[main_symbol]++;
1048
1049         if (next_chosen_item) {
1050                 *(*next_chosen_item)++ = (struct lzx_item) {
1051                         .data = main_symbol,
1052                 };
1053         }
1054 }
1055
1056 /* Tally, and optionally record, the specified repeat offset match.  */
1057 static inline void
1058 lzx_declare_repeat_offset_match(struct lzx_compressor *c,
1059                                 unsigned len, unsigned rep_index,
1060                                 struct lzx_item **next_chosen_item)
1061 {
1062         unsigned len_header;
1063         unsigned len_symbol;
1064         unsigned main_symbol;
1065
1066         if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1067                 len_header = len - LZX_MIN_MATCH_LEN;
1068                 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1069         } else {
1070                 len_header = LZX_NUM_PRIMARY_LENS;
1071                 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1072                 c->freqs.len[len_symbol]++;
1073         }
1074
1075         main_symbol = lzx_main_symbol_for_match(rep_index, len_header);
1076
1077         c->freqs.main[main_symbol]++;
1078
1079         if (next_chosen_item) {
1080                 *(*next_chosen_item)++ = (struct lzx_item) {
1081                         .data = (u64)main_symbol | ((u64)len_symbol << 10),
1082                 };
1083         }
1084 }
1085
1086 /* Tally, and optionally record, the specified explicit offset match.  */
1087 static inline void
1088 lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
1089                                   struct lzx_item **next_chosen_item)
1090 {
1091         unsigned len_header;
1092         unsigned len_symbol;
1093         unsigned main_symbol;
1094         unsigned offset_slot;
1095         unsigned num_extra_bits;
1096         u32 extra_bits;
1097
1098         if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1099                 len_header = len - LZX_MIN_MATCH_LEN;
1100                 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1101         } else {
1102                 len_header = LZX_NUM_PRIMARY_LENS;
1103                 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1104                 c->freqs.len[len_symbol]++;
1105         }
1106
1107         offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1108                         lzx_get_offset_slot_fast(c, offset) :
1109                         lzx_get_offset_slot(offset);
1110
1111         main_symbol = lzx_main_symbol_for_match(offset_slot, len_header);
1112
1113         c->freqs.main[main_symbol]++;
1114
1115         num_extra_bits = lzx_extra_offset_bits[offset_slot];
1116
1117         if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS)
1118                 c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) &
1119                                  LZX_ALIGNED_OFFSET_BITMASK]++;
1120
1121         if (next_chosen_item) {
1122
1123                 extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) -
1124                              lzx_offset_slot_base[offset_slot];
1125
1126                 STATIC_ASSERT(LZX_MAINCODE_MAX_NUM_SYMBOLS <= (1 << 10));
1127                 STATIC_ASSERT(LZX_LENCODE_NUM_SYMBOLS <= (1 << 8));
1128                 *(*next_chosen_item)++ = (struct lzx_item) {
1129                         .data = (u64)main_symbol |
1130                                 ((u64)len_symbol << 10) |
1131                                 ((u64)num_extra_bits << 18) |
1132                                 ((u64)extra_bits << 23),
1133                 };
1134         }
1135 }
1136
1137
1138 /* Tally, and optionally record, the specified match or literal.  */
1139 static inline void
1140 lzx_declare_item(struct lzx_compressor *c, u32 item,
1141                  struct lzx_item **next_chosen_item)
1142 {
1143         u32 len = item & OPTIMUM_LEN_MASK;
1144         u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1145
1146         if (len == 1)
1147                 lzx_declare_literal(c, offset_data, next_chosen_item);
1148         else if (offset_data < LZX_NUM_RECENT_OFFSETS)
1149                 lzx_declare_repeat_offset_match(c, len, offset_data,
1150                                                 next_chosen_item);
1151         else
1152                 lzx_declare_explicit_offset_match(c, len,
1153                                                   offset_data - LZX_OFFSET_ADJUSTMENT,
1154                                                   next_chosen_item);
1155 }
1156
1157 static inline void
1158 lzx_record_item_list(struct lzx_compressor *c,
1159                      struct lzx_optimum_node *cur_node,
1160                      struct lzx_item **next_chosen_item)
1161 {
1162         struct lzx_optimum_node *end_node;
1163         u32 saved_item;
1164         u32 item;
1165
1166         /* The list is currently in reverse order (last item to first item).
1167          * Reverse it.  */
1168         end_node = cur_node;
1169         saved_item = cur_node->item;
1170         do {
1171                 item = saved_item;
1172                 cur_node -= item & OPTIMUM_LEN_MASK;
1173                 saved_item = cur_node->item;
1174                 cur_node->item = item;
1175         } while (cur_node != c->optimum_nodes);
1176
1177         /* Walk the list of items from beginning to end, tallying and recording
1178          * each item.  */
1179         do {
1180                 lzx_declare_item(c, cur_node->item, next_chosen_item);
1181                 cur_node += (cur_node->item) & OPTIMUM_LEN_MASK;
1182         } while (cur_node != end_node);
1183 }
1184
1185 static inline void
1186 lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node)
1187 {
1188         /* Since we're just tallying the items, we don't need to reverse the
1189          * list.  Processing the items in reverse order is fine.  */
1190         do {
1191                 lzx_declare_item(c, cur_node->item, NULL);
1192                 cur_node -= (cur_node->item & OPTIMUM_LEN_MASK);
1193         } while (cur_node != c->optimum_nodes);
1194 }
1195
1196 /*
1197  * Find an inexpensive path through the graph of possible match/literal choices
1198  * for the current block.  The nodes of the graph are
1199  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1200  * the current block, plus one extra node for end-of-block.  The edges of the
1201  * graph are matches and literals.  The goal is to find the minimum cost path
1202  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1203  *
1204  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1205  * proceeding forwards one node at a time.  At each node, a selection of matches
1206  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1207  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1208  * such a path is the lowest cost found so far to reach that later node, then
1209  * that later node is updated with the new path.
1210  *
1211  * Note that although this algorithm is based on minimum cost path search, due
1212  * to various simplifying assumptions the result is not guaranteed to be the
1213  * true minimum cost, or "optimal", path over the graph of all valid LZX
1214  * representations of this block.
1215  *
1216  * Also, note that because of the presence of the recent offsets queue (which is
1217  * a type of adaptive state), the algorithm cannot work backwards and compute
1218  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1219  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1220  * only an approximation.  It's possible for the globally optimal, minimum cost
1221  * path to contain a prefix, ending at a position, where that path prefix is
1222  * *not* the minimum cost path to that position.  This can happen if such a path
1223  * prefix results in a different adaptive state which results in lower costs
1224  * later.  The algorithm does not solve this problem; it only considers the
1225  * lowest cost to reach each individual position.
1226  */
1227 static struct lzx_lru_queue
1228 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1229                        const u8 * const restrict block_begin,
1230                        const u32 block_size,
1231                        const struct lzx_lru_queue initial_queue)
1232 {
1233         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1234         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1235         struct lz_match *cache_ptr = c->match_cache;
1236         const u8 *in_next = block_begin;
1237         const u8 * const block_end = block_begin + block_size;
1238
1239         /* Instead of storing the match offset LRU queues in the
1240          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1241          * storing them in a smaller array.  This works because the algorithm
1242          * only requires a limited history of the adaptive state.  Once a given
1243          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1244          * it is no longer needed.  */
1245         struct lzx_lru_queue queues[512];
1246
1247         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1248 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1249
1250         /* Initially, the cost to reach each node is "infinity".  */
1251         memset(c->optimum_nodes, 0xFF,
1252                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1253
1254         QUEUE(block_begin) = initial_queue;
1255
1256         /* The following loop runs 'block_size' iterations, one per node.  */
1257         do {
1258                 unsigned num_matches;
1259                 unsigned literal;
1260                 u32 cost;
1261
1262                 /*
1263                  * A selection of matches for the block was already saved in
1264                  * memory so that we don't have to run the uncompressed data
1265                  * through the matchfinder on every optimization pass.  However,
1266                  * we still search for repeat offset matches during each
1267                  * optimization pass because we cannot predict the state of the
1268                  * recent offsets queue.  But as a heuristic, we don't bother
1269                  * searching for repeat offset matches if the general-purpose
1270                  * matchfinder failed to find any matches.
1271                  *
1272                  * Note that a match of length n at some offset implies there is
1273                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1274                  * that same offset.  In other words, we don't necessarily need
1275                  * to use the full length of a match.  The key heuristic that
1276                  * saves a significicant amount of time is that for each
1277                  * distinct length, we only consider the smallest offset for
1278                  * which that length is available.  This heuristic also applies
1279                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1280                  * any explicit offset.  Of course, this heuristic may be
1281                  * produce suboptimal results because offset slots in LZX are
1282                  * subject to entropy encoding, but in practice this is a useful
1283                  * heuristic.
1284                  */
1285
1286                 num_matches = cache_ptr->length;
1287                 cache_ptr++;
1288
1289                 if (num_matches) {
1290                         struct lz_match *end_matches = cache_ptr + num_matches;
1291                         unsigned next_len = LZX_MIN_MATCH_LEN;
1292                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1293                         const u8 *matchptr;
1294
1295                         /* Consider R0 match  */
1296                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1297                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1298                                 goto R0_done;
1299                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1300                         do {
1301                                 u32 cost = cur_node->cost +
1302                                            c->costs.match_cost[0][
1303                                                         next_len - LZX_MIN_MATCH_LEN];
1304                                 if (cost <= (cur_node + next_len)->cost) {
1305                                         (cur_node + next_len)->cost = cost;
1306                                         (cur_node + next_len)->item =
1307                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1308                                 }
1309                                 if (unlikely(++next_len > max_len)) {
1310                                         cache_ptr = end_matches;
1311                                         goto done_matches;
1312                                 }
1313                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1314
1315                 R0_done:
1316
1317                         /* Consider R1 match  */
1318                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1319                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1320                                 goto R1_done;
1321                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1322                                 goto R1_done;
1323                         for (unsigned len = 2; len < next_len - 1; len++)
1324                                 if (matchptr[len] != in_next[len])
1325                                         goto R1_done;
1326                         do {
1327                                 u32 cost = cur_node->cost +
1328                                            c->costs.match_cost[1][
1329                                                         next_len - LZX_MIN_MATCH_LEN];
1330                                 if (cost <= (cur_node + next_len)->cost) {
1331                                         (cur_node + next_len)->cost = cost;
1332                                         (cur_node + next_len)->item =
1333                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1334                                 }
1335                                 if (unlikely(++next_len > max_len)) {
1336                                         cache_ptr = end_matches;
1337                                         goto done_matches;
1338                                 }
1339                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1340
1341                 R1_done:
1342
1343                         /* Consider R2 match  */
1344                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1345                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1346                                 goto R2_done;
1347                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1348                                 goto R2_done;
1349                         for (unsigned len = 2; len < next_len - 1; len++)
1350                                 if (matchptr[len] != in_next[len])
1351                                         goto R2_done;
1352                         do {
1353                                 u32 cost = cur_node->cost +
1354                                            c->costs.match_cost[2][
1355                                                         next_len - LZX_MIN_MATCH_LEN];
1356                                 if (cost <= (cur_node + next_len)->cost) {
1357                                         (cur_node + next_len)->cost = cost;
1358                                         (cur_node + next_len)->item =
1359                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1360                                 }
1361                                 if (unlikely(++next_len > max_len)) {
1362                                         cache_ptr = end_matches;
1363                                         goto done_matches;
1364                                 }
1365                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1366
1367                 R2_done:
1368
1369                         while (next_len > cache_ptr->length)
1370                                 if (++cache_ptr == end_matches)
1371                                         goto done_matches;
1372
1373                         /* Consider explicit offset matches  */
1374                         do {
1375                                 u32 offset = cache_ptr->offset;
1376                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1377                                 unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1378                                                 lzx_get_offset_slot_fast(c, offset) :
1379                                                 lzx_get_offset_slot(offset);
1380                                 do {
1381                                         u32 cost = cur_node->cost +
1382                                                    c->costs.match_cost[offset_slot][
1383                                                                 next_len - LZX_MIN_MATCH_LEN];
1384                                 #if LZX_CONSIDER_ALIGNED_COSTS
1385                                         if (lzx_extra_offset_bits[offset_slot] >=
1386                                             LZX_NUM_ALIGNED_OFFSET_BITS)
1387                                                 cost += c->costs.aligned[offset_data &
1388                                                                          LZX_ALIGNED_OFFSET_BITMASK];
1389                                 #endif
1390                                         if (cost < (cur_node + next_len)->cost) {
1391                                                 (cur_node + next_len)->cost = cost;
1392                                                 (cur_node + next_len)->item =
1393                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1394                                         }
1395                                 } while (++next_len <= cache_ptr->length);
1396                         } while (++cache_ptr != end_matches);
1397                 }
1398
1399         done_matches:
1400
1401                 /* Consider coding a literal.
1402
1403                  * To avoid an extra branch, actually checking the preferability
1404                  * of coding the literal is integrated into the queue update
1405                  * code below.  */
1406                 literal = *in_next++;
1407                 cost = cur_node->cost +
1408                        c->costs.main[lzx_main_symbol_for_literal(literal)];
1409
1410                 /* Advance to the next position.  */
1411                 cur_node++;
1412
1413                 /* The lowest-cost path to the current position is now known.
1414                  * Finalize the recent offsets queue that results from taking
1415                  * this lowest-cost path.  */
1416
1417                 if (cost <= cur_node->cost) {
1418                         /* Literal: queue remains unchanged.  */
1419                         cur_node->cost = cost;
1420                         cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1;
1421                         QUEUE(in_next) = QUEUE(in_next - 1);
1422                 } else {
1423                         /* Match: queue update is needed.  */
1424                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1425                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1426                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1427                                 /* Explicit offset match: insert offset at front  */
1428                                 QUEUE(in_next) =
1429                                         lzx_lru_queue_push(QUEUE(in_next - len),
1430                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1431                         } else {
1432                                 /* Repeat offset match: swap offset to front  */
1433                                 QUEUE(in_next) =
1434                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1435                                                            offset_data);
1436                         }
1437                 }
1438         } while (cur_node != end_node);
1439
1440         /* Return the match offset queue at the end of the minimum cost path. */
1441         return QUEUE(block_end);
1442 }
1443
1444 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1445 static void
1446 lzx_compute_match_costs(struct lzx_compressor *c)
1447 {
1448         unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1449         struct lzx_costs *costs = &c->costs;
1450
1451         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1452
1453                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1454                 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1455                 unsigned i;
1456
1457         #if LZX_CONSIDER_ALIGNED_COSTS
1458                 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1459                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1460         #endif
1461
1462                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1463                         costs->match_cost[offset_slot][i] =
1464                                 costs->main[main_symbol++] + extra_cost;
1465
1466                 extra_cost += costs->main[main_symbol];
1467
1468                 for (; i < LZX_NUM_LENS; i++)
1469                         costs->match_cost[offset_slot][i] =
1470                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1471         }
1472 }
1473
1474 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1475  * algorithm.  */
1476 static void
1477 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1478 {
1479         u32 i;
1480         bool have_byte[256];
1481         unsigned num_used_bytes;
1482
1483         /* The costs below are hard coded to use a scaling factor of 16.  */
1484         STATIC_ASSERT(LZX_BIT_COST == 16);
1485
1486         /*
1487          * Heuristics:
1488          *
1489          * - Use smaller initial costs for literal symbols when the input buffer
1490          *   contains fewer distinct bytes.
1491          *
1492          * - Assume that match symbols are more costly than literal symbols.
1493          *
1494          * - Assume that length symbols for shorter lengths are less costly than
1495          *   length symbols for longer lengths.
1496          */
1497
1498         for (i = 0; i < 256; i++)
1499                 have_byte[i] = false;
1500
1501         for (i = 0; i < block_size; i++)
1502                 have_byte[block[i]] = true;
1503
1504         num_used_bytes = 0;
1505         for (i = 0; i < 256; i++)
1506                 num_used_bytes += have_byte[i];
1507
1508         for (i = 0; i < 256; i++)
1509                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1510
1511         for (; i < c->num_main_syms; i++)
1512                 c->costs.main[i] = 170;
1513
1514         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1515                 c->costs.len[i] = 103 + (i / 4);
1516
1517 #if LZX_CONSIDER_ALIGNED_COSTS
1518         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1519                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1520 #endif
1521
1522         lzx_compute_match_costs(c);
1523 }
1524
1525 /* Update the current cost model to reflect the computed Huffman codes.  */
1526 static void
1527 lzx_update_costs(struct lzx_compressor *c)
1528 {
1529         unsigned i;
1530         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1531
1532         for (i = 0; i < c->num_main_syms; i++)
1533                 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1534
1535         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1536                 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1537
1538 #if LZX_CONSIDER_ALIGNED_COSTS
1539         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1540                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1541 #endif
1542
1543         lzx_compute_match_costs(c);
1544 }
1545
1546 static struct lzx_lru_queue
1547 lzx_optimize_and_write_block(struct lzx_compressor *c,
1548                              struct lzx_output_bitstream *os,
1549                              const u8 *block_begin, const u32 block_size,
1550                              const struct lzx_lru_queue initial_queue)
1551 {
1552         unsigned num_passes_remaining = c->num_optim_passes;
1553         struct lzx_item *next_chosen_item;
1554         struct lzx_lru_queue new_queue;
1555
1556         /* The first optimization pass uses a default cost model.  Each
1557          * additional optimization pass uses a cost model derived from the
1558          * Huffman code computed in the previous pass.  */
1559
1560         lzx_set_default_costs(c, block_begin, block_size);
1561         lzx_reset_symbol_frequencies(c);
1562         do {
1563                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1564                                                    initial_queue);
1565                 if (num_passes_remaining > 1) {
1566                         lzx_tally_item_list(c, c->optimum_nodes + block_size);
1567                         lzx_make_huffman_codes(c);
1568                         lzx_update_costs(c);
1569                         lzx_reset_symbol_frequencies(c);
1570                 }
1571         } while (--num_passes_remaining);
1572
1573         next_chosen_item = c->chosen_items;
1574         lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item);
1575         lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items);
1576         return new_queue;
1577 }
1578
1579 /*
1580  * This is the "near-optimal" LZX compressor.
1581  *
1582  * For each block, it performs a relatively thorough graph search to find an
1583  * inexpensive (in terms of compressed size) way to output that block.
1584  *
1585  * Note: there are actually many things this algorithm leaves on the table in
1586  * terms of compression ratio.  So although it may be "near-optimal", it is
1587  * certainly not "optimal".  The goal is not to produce the optimal compression
1588  * ratio, which for LZX is probably impossible within any practical amount of
1589  * time, but rather to produce a compression ratio significantly better than a
1590  * simpler "greedy" or "lazy" parse while still being relatively fast.
1591  */
1592 static void
1593 lzx_compress_near_optimal(struct lzx_compressor *c,
1594                           struct lzx_output_bitstream *os)
1595 {
1596         const u8 * const in_begin = c->in_buffer;
1597         const u8 *       in_next = in_begin;
1598         const u8 * const in_end  = in_begin + c->in_nbytes;
1599         unsigned max_len = LZX_MAX_MATCH_LEN;
1600         unsigned nice_len = min(c->nice_match_length, max_len);
1601         u32 next_hash;
1602         struct lzx_lru_queue queue;
1603
1604         bt_matchfinder_init(&c->bt_mf);
1605         memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1606         next_hash = bt_matchfinder_hash_3_bytes(in_next);
1607         lzx_lru_queue_init(&queue);
1608
1609         do {
1610                 /* Starting a new block  */
1611                 const u8 * const in_block_begin = in_next;
1612                 const u8 * const in_block_end =
1613                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1614
1615                 /* Run the block through the matchfinder and cache the matches. */
1616                 struct lz_match *cache_ptr = c->match_cache;
1617                 do {
1618                         struct lz_match *lz_matchptr;
1619                         u32 hash2;
1620                         pos_t cur_match;
1621                         unsigned best_len;
1622
1623                         /* If approaching the end of the input buffer, adjust
1624                          * 'max_len' and 'nice_len' accordingly.  */
1625                         if (unlikely(max_len > in_end - in_next)) {
1626                                 max_len = in_end - in_next;
1627                                 nice_len = min(max_len, nice_len);
1628
1629                                 /* This extra check is needed to ensure that we
1630                                  * never output a length 2 match of the very
1631                                  * last two bytes with the very first two bytes,
1632                                  * since such a match has an offset too large to
1633                                  * be represented.  */
1634                                 if (unlikely(max_len < 3)) {
1635                                         in_next++;
1636                                         cache_ptr->length = 0;
1637                                         cache_ptr++;
1638                                         continue;
1639                                 }
1640                         }
1641
1642                         lz_matchptr = cache_ptr + 1;
1643
1644                         /* Check for a length 2 match.  */
1645                         hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1646                         cur_match = c->hash2_tab[hash2];
1647                         c->hash2_tab[hash2] = in_next - in_begin;
1648                         if (cur_match != 0 &&
1649                             (LZX_HASH2_ORDER == 16 ||
1650                              load_u16_unaligned(&in_begin[cur_match]) ==
1651                              load_u16_unaligned(in_next)))
1652                         {
1653                                 lz_matchptr->length = 2;
1654                                 lz_matchptr->offset = in_next - &in_begin[cur_match];
1655                                 lz_matchptr++;
1656                         }
1657
1658                         /* Check for matches of length >= 3.  */
1659                         lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1660                                                                  in_begin,
1661                                                                  in_next,
1662                                                                  3,
1663                                                                  max_len,
1664                                                                  nice_len,
1665                                                                  c->max_search_depth,
1666                                                                  &next_hash,
1667                                                                  &best_len,
1668                                                                  lz_matchptr);
1669                         in_next++;
1670                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1671                         cache_ptr = lz_matchptr;
1672
1673                         /*
1674                          * If there was a very long match found, then don't
1675                          * cache any matches for the bytes covered by that
1676                          * match.  This avoids degenerate behavior when
1677                          * compressing highly redundant data, where the number
1678                          * of matches can be very large.
1679                          *
1680                          * This heuristic doesn't actually hurt the compression
1681                          * ratio very much.  If there's a long match, then the
1682                          * data must be highly compressible, so it doesn't
1683                          * matter as much what we do.
1684                          */
1685                         if (best_len >= nice_len) {
1686                                 --best_len;
1687                                 do {
1688                                         if (unlikely(max_len > in_end - in_next)) {
1689                                                 max_len = in_end - in_next;
1690                                                 nice_len = min(max_len, nice_len);
1691                                                 if (unlikely(max_len < 3)) {
1692                                                         in_next++;
1693                                                         cache_ptr->length = 0;
1694                                                         cache_ptr++;
1695                                                         continue;
1696                                                 }
1697                                         }
1698                                         c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1699                                                 in_next - in_begin;
1700                                         bt_matchfinder_skip_position(&c->bt_mf,
1701                                                                      in_begin,
1702                                                                      in_next,
1703                                                                      in_end,
1704                                                                      nice_len,
1705                                                                      c->max_search_depth,
1706                                                                      &next_hash);
1707                                         in_next++;
1708                                         cache_ptr->length = 0;
1709                                         cache_ptr++;
1710                                 } while (--best_len);
1711                         }
1712                 } while (in_next < in_block_end &&
1713                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1714
1715                 /* We've finished running the block through the matchfinder.
1716                  * Now choose a match/literal sequence and write the block.  */
1717
1718                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1719                                                      in_next - in_block_begin,
1720                                                      queue);
1721         } while (in_next != in_end);
1722 }
1723
1724 /*
1725  * Given a pointer to the current byte sequence and the current list of recent
1726  * match offsets, find the longest repeat offset match.
1727  *
1728  * If no match of at least 2 bytes is found, then return 0.
1729  *
1730  * If a match of at least 2 bytes is found, then return its length and set
1731  * *rep_max_idx_ret to the index of its offset in @queue.
1732 */
1733 static unsigned
1734 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1735                                      const u32 bytes_remaining,
1736                                      struct lzx_lru_queue queue,
1737                                      unsigned *rep_max_idx_ret)
1738 {
1739         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1740         LZX_ASSERT(bytes_remaining >= 2);
1741
1742         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1743         const u16 next_2_bytes = load_u16_unaligned(in_next);
1744         const u8 *matchptr;
1745         unsigned rep_max_len;
1746         unsigned rep_max_idx;
1747         unsigned rep_len;
1748
1749         matchptr = in_next - lzx_lru_queue_pop(&queue);
1750         if (load_u16_unaligned(matchptr) == next_2_bytes)
1751                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1752         else
1753                 rep_max_len = 0;
1754         rep_max_idx = 0;
1755
1756         matchptr = in_next - lzx_lru_queue_pop(&queue);
1757         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1758                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1759                 if (rep_len > rep_max_len) {
1760                         rep_max_len = rep_len;
1761                         rep_max_idx = 1;
1762                 }
1763         }
1764
1765         matchptr = in_next - lzx_lru_queue_pop(&queue);
1766         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1767                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1768                 if (rep_len > rep_max_len) {
1769                         rep_max_len = rep_len;
1770                         rep_max_idx = 2;
1771                 }
1772         }
1773
1774         *rep_max_idx_ret = rep_max_idx;
1775         return rep_max_len;
1776 }
1777
1778 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1779 static inline unsigned
1780 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1781 {
1782         unsigned score = len;
1783
1784         if (adjusted_offset < 4096)
1785                 score++;
1786
1787         if (adjusted_offset < 256)
1788                 score++;
1789
1790         return score;
1791 }
1792
1793 static inline unsigned
1794 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1795 {
1796         return rep_len + 3;
1797 }
1798
1799 /* This is the "lazy" LZX compressor.  */
1800 static void
1801 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1802 {
1803         const u8 * const in_begin = c->in_buffer;
1804         const u8 *       in_next = in_begin;
1805         const u8 * const in_end  = in_begin + c->in_nbytes;
1806         unsigned max_len = LZX_MAX_MATCH_LEN;
1807         unsigned nice_len = min(c->nice_match_length, max_len);
1808         struct lzx_lru_queue queue;
1809         u32 next_hashes[2] = {};
1810
1811         hc_matchfinder_init(&c->hc_mf);
1812         lzx_lru_queue_init(&queue);
1813
1814         do {
1815                 /* Starting a new block  */
1816
1817                 const u8 * const in_block_begin = in_next;
1818                 const u8 * const in_block_end =
1819                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1820                 struct lzx_item *next_chosen_item = c->chosen_items;
1821                 unsigned cur_len;
1822                 u32 cur_offset;
1823                 u32 cur_offset_data;
1824                 unsigned cur_score;
1825                 unsigned next_len;
1826                 u32 next_offset;
1827                 u32 next_offset_data;
1828                 unsigned next_score;
1829                 unsigned rep_max_len;
1830                 unsigned rep_max_idx;
1831                 unsigned rep_score;
1832                 unsigned skip_len;
1833
1834                 lzx_reset_symbol_frequencies(c);
1835
1836                 do {
1837                         if (unlikely(max_len > in_end - in_next)) {
1838                                 max_len = in_end - in_next;
1839                                 nice_len = min(max_len, nice_len);
1840                         }
1841
1842                         /* Find the longest match at the current position.  */
1843
1844                         cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1845                                                                in_begin,
1846                                                                in_next - in_begin,
1847                                                                2,
1848                                                                max_len,
1849                                                                nice_len,
1850                                                                c->max_search_depth,
1851                                                                next_hashes,
1852                                                                &cur_offset);
1853                         if (cur_len < 3 ||
1854                             (cur_len == 3 &&
1855                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
1856                              cur_offset != lzx_lru_queue_R0(queue) &&
1857                              cur_offset != lzx_lru_queue_R1(queue) &&
1858                              cur_offset != lzx_lru_queue_R2(queue)))
1859                         {
1860                                 /* There was no match found, or the only match found
1861                                  * was a distant length 3 match.  Output a literal.  */
1862                                 lzx_declare_literal(c, *in_next++,
1863                                                     &next_chosen_item);
1864                                 continue;
1865                         }
1866
1867                         if (cur_offset == lzx_lru_queue_R0(queue)) {
1868                                 in_next++;
1869                                 cur_offset_data = 0;
1870                                 skip_len = cur_len - 1;
1871                                 goto choose_cur_match;
1872                         }
1873
1874                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
1875                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
1876
1877                         /* Consider a repeat offset match  */
1878                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1879                                                                            in_end - in_next,
1880                                                                            queue,
1881                                                                            &rep_max_idx);
1882                         in_next++;
1883
1884                         if (rep_max_len >= 3 &&
1885                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1886                                                                        rep_max_idx)) >= cur_score)
1887                         {
1888                                 cur_len = rep_max_len;
1889                                 cur_offset_data = rep_max_idx;
1890                                 skip_len = rep_max_len - 1;
1891                                 goto choose_cur_match;
1892                         }
1893
1894                 have_cur_match:
1895
1896                         /* We have a match at the current position.  */
1897
1898                         /* If we have a very long match, choose it immediately.  */
1899                         if (cur_len >= nice_len) {
1900                                 skip_len = cur_len - 1;
1901                                 goto choose_cur_match;
1902                         }
1903
1904                         /* See if there's a better match at the next position.  */
1905
1906                         if (unlikely(max_len > in_end - in_next)) {
1907                                 max_len = in_end - in_next;
1908                                 nice_len = min(max_len, nice_len);
1909                         }
1910
1911                         next_len = hc_matchfinder_longest_match(&c->hc_mf,
1912                                                                 in_begin,
1913                                                                 in_next - in_begin,
1914                                                                 cur_len - 2,
1915                                                                 max_len,
1916                                                                 nice_len,
1917                                                                 c->max_search_depth / 2,
1918                                                                 next_hashes,
1919                                                                 &next_offset);
1920
1921                         if (next_len <= cur_len - 2) {
1922                                 in_next++;
1923                                 skip_len = cur_len - 2;
1924                                 goto choose_cur_match;
1925                         }
1926
1927                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
1928                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
1929
1930                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1931                                                                            in_end - in_next,
1932                                                                            queue,
1933                                                                            &rep_max_idx);
1934                         in_next++;
1935
1936                         if (rep_max_len >= 3 &&
1937                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1938                                                                        rep_max_idx)) >= next_score)
1939                         {
1940
1941                                 if (rep_score > cur_score) {
1942                                         /* The next match is better, and it's a
1943                                          * repeat offset match.  */
1944                                         lzx_declare_literal(c, *(in_next - 2),
1945                                                             &next_chosen_item);
1946                                         cur_len = rep_max_len;
1947                                         cur_offset_data = rep_max_idx;
1948                                         skip_len = cur_len - 1;
1949                                         goto choose_cur_match;
1950                                 }
1951                         } else {
1952                                 if (next_score > cur_score) {
1953                                         /* The next match is better, and it's an
1954                                          * explicit offset match.  */
1955                                         lzx_declare_literal(c, *(in_next - 2),
1956                                                             &next_chosen_item);
1957                                         cur_len = next_len;
1958                                         cur_offset_data = next_offset_data;
1959                                         cur_score = next_score;
1960                                         goto have_cur_match;
1961                                 }
1962                         }
1963
1964                         /* The original match was better.  */
1965                         skip_len = cur_len - 2;
1966
1967                 choose_cur_match:
1968                         if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) {
1969                                 lzx_declare_repeat_offset_match(c, cur_len,
1970                                                                 cur_offset_data,
1971                                                                 &next_chosen_item);
1972                                 queue = lzx_lru_queue_swap(queue, cur_offset_data);
1973                         } else {
1974                                 lzx_declare_explicit_offset_match(c, cur_len,
1975                                                                   cur_offset_data - LZX_OFFSET_ADJUSTMENT,
1976                                                                   &next_chosen_item);
1977                                 queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT);
1978                         }
1979
1980                         hc_matchfinder_skip_positions(&c->hc_mf,
1981                                                       in_begin,
1982                                                       in_next - in_begin,
1983                                                       in_end - in_begin,
1984                                                       skip_len,
1985                                                       next_hashes);
1986                         in_next += skip_len;
1987                 } while (in_next < in_block_end);
1988
1989                 lzx_finish_block(c, os, in_next - in_block_begin,
1990                                  next_chosen_item - c->chosen_items);
1991         } while (in_next != in_end);
1992 }
1993
1994 static void
1995 lzx_init_offset_slot_fast(struct lzx_compressor *c)
1996 {
1997         u8 slot = 0;
1998
1999         for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
2000
2001                 while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1])
2002                         slot++;
2003
2004                 c->offset_slot_fast[offset] = slot;
2005         }
2006 }
2007
2008 static size_t
2009 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2010 {
2011         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2012                 return offsetof(struct lzx_compressor, hc_mf) +
2013                         hc_matchfinder_size(max_bufsize);
2014         } else {
2015                 return offsetof(struct lzx_compressor, bt_mf) +
2016                         bt_matchfinder_size(max_bufsize);
2017         }
2018 }
2019
2020 static u64
2021 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2022                       bool destructive)
2023 {
2024         u64 size = 0;
2025
2026         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2027                 return 0;
2028
2029         size += lzx_get_compressor_size(max_bufsize, compression_level);
2030         if (!destructive)
2031                 size += max_bufsize; /* in_buffer */
2032         return size;
2033 }
2034
2035 static int
2036 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2037                       bool destructive, void **c_ret)
2038 {
2039         unsigned window_order;
2040         struct lzx_compressor *c;
2041
2042         window_order = lzx_get_window_order(max_bufsize);
2043         if (window_order == 0)
2044                 return WIMLIB_ERR_INVALID_PARAM;
2045
2046         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2047         if (!c)
2048                 goto oom0;
2049
2050         c->destructive = destructive;
2051
2052         c->num_main_syms = lzx_get_num_main_syms(window_order);
2053         c->window_order = window_order;
2054
2055         if (!c->destructive) {
2056                 c->in_buffer = MALLOC(max_bufsize);
2057                 if (!c->in_buffer)
2058                         goto oom1;
2059         }
2060
2061         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2062
2063                 /* Fast compression: Use lazy parsing.  */
2064
2065                 c->impl = lzx_compress_lazy;
2066                 c->max_search_depth = (36 * compression_level) / 20;
2067                 c->nice_match_length = (72 * compression_level) / 20;
2068
2069                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2070                  * halves the max_search_depth when attempting a lazy match, and
2071                  * max_search_depth cannot be 0.  */
2072                 if (c->max_search_depth < 2)
2073                         c->max_search_depth = 2;
2074         } else {
2075
2076                 /* Normal / high compression: Use near-optimal parsing.  */
2077
2078                 c->impl = lzx_compress_near_optimal;
2079
2080                 /* Scale nice_match_length and max_search_depth with the
2081                  * compression level.  */
2082                 c->max_search_depth = (24 * compression_level) / 50;
2083                 c->nice_match_length = (32 * compression_level) / 50;
2084
2085                 /* Set a number of optimization passes appropriate for the
2086                  * compression level.  */
2087
2088                 c->num_optim_passes = 1;
2089
2090                 if (compression_level >= 45)
2091                         c->num_optim_passes++;
2092
2093                 /* Use more optimization passes for higher compression levels.
2094                  * But the more passes there are, the less they help --- so
2095                  * don't add them linearly.  */
2096                 if (compression_level >= 70) {
2097                         c->num_optim_passes++;
2098                         if (compression_level >= 100)
2099                                 c->num_optim_passes++;
2100                         if (compression_level >= 150)
2101                                 c->num_optim_passes++;
2102                         if (compression_level >= 200)
2103                                 c->num_optim_passes++;
2104                         if (compression_level >= 300)
2105                                 c->num_optim_passes++;
2106                 }
2107         }
2108
2109         /* max_search_depth == 0 is invalid.  */
2110         if (c->max_search_depth < 1)
2111                 c->max_search_depth = 1;
2112
2113         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2114                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2115
2116         lzx_init_offset_slot_fast(c);
2117         *c_ret = c;
2118         return 0;
2119
2120 oom1:
2121         FREE(c);
2122 oom0:
2123         return WIMLIB_ERR_NOMEM;
2124 }
2125
2126 static size_t
2127 lzx_compress(const void *restrict in, size_t in_nbytes,
2128              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2129 {
2130         struct lzx_compressor *c = _c;
2131         struct lzx_output_bitstream os;
2132         size_t result;
2133
2134         /* Don't bother trying to compress very small inputs.  */
2135         if (in_nbytes < 100)
2136                 return 0;
2137
2138         /* Copy the input data into the internal buffer and preprocess it.  */
2139         if (c->destructive)
2140                 c->in_buffer = (void *)in;
2141         else
2142                 memcpy(c->in_buffer, in, in_nbytes);
2143         c->in_nbytes = in_nbytes;
2144         lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2145
2146         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2147         c->codes_index = 0;
2148         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2149
2150         /* Initialize the output bitstream.  */
2151         lzx_init_output(&os, out, out_nbytes_avail);
2152
2153         /* Call the compression level-specific compress() function.  */
2154         (*c->impl)(c, &os);
2155
2156         /* Flush the output bitstream and return the compressed size or 0.  */
2157         result = lzx_flush_output(&os);
2158         if (!result && c->destructive)
2159                 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2160         return result;
2161 }
2162
2163 static void
2164 lzx_free_compressor(void *_c)
2165 {
2166         struct lzx_compressor *c = _c;
2167
2168         if (!c->destructive)
2169                 FREE(c->in_buffer);
2170         FREE(c);
2171 }
2172
2173 const struct compressor_ops lzx_compressor_ops = {
2174         .get_needed_memory  = lzx_get_needed_memory,
2175         .create_compressor  = lzx_create_compressor,
2176         .compress           = lzx_compress,
2177         .free_compressor    = lzx_free_compressor,
2178 };