]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
lzx_compress.c: optimize bit output
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior (immediately terminating the block) on
83  * cache overflow is still required.
84  */
85 #define LZX_CACHE_PER_POS       7
86
87 /*
88  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89  * excluding the extra "overflow" entries.  The per-position multiplier is '1 +
90  * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91  * overhead of one lz_match per position, used to hold the match count at that
92  * position.
93  */
94 #define LZX_CACHE_LENGTH        (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
95
96 /*
97  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98  * ever be saved in the match cache for a single position.  Since each match we
99  * save for a single position has a distinct length, we can use the number of
100  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
101  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102  * it will never actually be reached.
103  */
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
105
106 /*
107  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108  * This makes it possible to consider fractional bit costs.
109  *
110  * Note: this is only useful as a statistical trick for when the true costs are
111  * unknown.  In reality, each token in LZX requires a whole number of bits to
112  * output.
113  */
114 #define LZX_BIT_COST            16
115
116 /*
117  * Consideration of aligned offset costs is disabled for now, due to
118  * insufficient benefit gained from the time spent.
119  */
120 #define LZX_CONSIDER_ALIGNED_COSTS      0
121
122 /*
123  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
124  * faster algorithm.
125  */
126 #define LZX_MAX_FAST_LEVEL      34
127
128 /*
129  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130  * for finding length 2 matches.  This can be as high as 16 (in which case the
131  * hash function is trivial), but using a smaller hash table speeds up
132  * compression due to reduced cache pressure.
133  */
134 #define LZX_HASH2_ORDER         12
135 #define LZX_HASH2_LENGTH        (1UL << LZX_HASH2_ORDER)
136
137 /*
138  * These are the compressor-side limits on the codeword lengths for each Huffman
139  * code.  To make outputting bits slightly faster, some of these limits are
140  * lower than the limits defined by the LZX format.  This does not significantly
141  * affect the compression ratio, at least for the block sizes we use.
142  */
143 #define MAIN_CODEWORD_LIMIT     12      /* 64-bit: can buffer 4 main symbols  */
144 #define LENGTH_CODEWORD_LIMIT   12
145 #define ALIGNED_CODEWORD_LIMIT  7
146 #define PRE_CODEWORD_LIMIT      7
147
148 #include "wimlib/lzx_common.h"
149
150 /*
151  * The maximum allowed window order for the matchfinder.
152  */
153 #define MATCHFINDER_MAX_WINDOW_ORDER    LZX_MAX_WINDOW_ORDER
154
155 #include <string.h>
156
157 #include "wimlib/bt_matchfinder.h"
158 #include "wimlib/compress_common.h"
159 #include "wimlib/compressor_ops.h"
160 #include "wimlib/error.h"
161 #include "wimlib/hc_matchfinder.h"
162 #include "wimlib/lz_extend.h"
163 #include "wimlib/unaligned.h"
164 #include "wimlib/util.h"
165
166 struct lzx_output_bitstream;
167
168 /* Codewords for the LZX Huffman codes.  */
169 struct lzx_codewords {
170         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
171         u32 len[LZX_LENCODE_NUM_SYMBOLS];
172         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
173 };
174
175 /* Codeword lengths (in bits) for the LZX Huffman codes.
176  * A zero length means the corresponding codeword has zero frequency.  */
177 struct lzx_lens {
178         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
179         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
180         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
181 };
182
183 /* Cost model for near-optimal parsing  */
184 struct lzx_costs {
185
186         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
187          * length 'len' match that has an offset belonging to 'offset_slot'.  */
188         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
189
190         /* Cost for each symbol in the main code  */
191         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
192
193         /* Cost for each symbol in the length code  */
194         u32 len[LZX_LENCODE_NUM_SYMBOLS];
195
196 #if LZX_CONSIDER_ALIGNED_COSTS
197         /* Cost for each symbol in the aligned code  */
198         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
199 #endif
200 };
201
202 /* Codewords and lengths for the LZX Huffman codes.  */
203 struct lzx_codes {
204         struct lzx_codewords codewords;
205         struct lzx_lens lens;
206 };
207
208 /* Symbol frequency counters for the LZX Huffman codes.  */
209 struct lzx_freqs {
210         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
211         u32 len[LZX_LENCODE_NUM_SYMBOLS];
212         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
213 };
214
215 /*
216  * Represents a run of literals followed by a match or end-of-block.  This
217  * struct is needed to temporarily store items chosen by the parser, since items
218  * cannot be written until all items for the block have been chosen and the
219  * block's Huffman codes have been computed.
220  */
221 struct lzx_sequence {
222
223         /* The number of literals in the run.  This may be 0.  The literals are
224          * not stored explicitly in this structure; instead, they are read
225          * directly from the uncompressed data.  */
226         u16 litrunlen;
227
228         /* If the next field doesn't indicate end-of-block, then this is the
229          * match length minus LZX_MIN_MATCH_LEN.  */
230         u16 adjusted_length;
231
232         /* If bit 31 is clear, then this field contains the match header in bits
233          * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
234          * Otherwise, this sequence's literal run was the last literal run in
235          * the block, so there is no match that follows it.  */
236         u32 adjusted_offset_and_match_hdr;
237 };
238
239 /*
240  * This structure represents a byte position in the input buffer and a node in
241  * the graph of possible match/literal choices.
242  *
243  * Logically, each incoming edge to this node is labeled with a literal or a
244  * match that can be taken to reach this position from an earlier position; and
245  * each outgoing edge from this node is labeled with a literal or a match that
246  * can be taken to advance from this position to a later position.
247  */
248 struct lzx_optimum_node {
249
250         /* The cost, in bits, of the lowest-cost path that has been found to
251          * reach this position.  This can change as progressively lower cost
252          * paths are found to reach this position.  */
253         u32 cost;
254
255         /*
256          * The match or literal that was taken to reach this position.  This can
257          * change as progressively lower cost paths are found to reach this
258          * position.
259          *
260          * This variable is divided into two bitfields.
261          *
262          * Literals:
263          *      Low bits are 0, high bits are the literal.
264          *
265          * Explicit offset matches:
266          *      Low bits are the match length, high bits are the offset plus 2.
267          *
268          * Repeat offset matches:
269          *      Low bits are the match length, high bits are the queue index.
270          */
271         u32 item;
272 #define OPTIMUM_OFFSET_SHIFT 9
273 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
274 } _aligned_attribute(8);
275
276 /*
277  * Least-recently-used queue for match offsets.
278  *
279  * This is represented as a 64-bit integer for efficiency.  There are three
280  * offsets of 21 bits each.  Bit 64 is garbage.
281  */
282 struct lzx_lru_queue {
283         u64 R;
284 };
285
286 #define LZX_QUEUE64_OFFSET_SHIFT 21
287 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
288
289 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
290 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
291 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
292
293 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
294 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
295 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
296
297 static inline void
298 lzx_lru_queue_init(struct lzx_lru_queue *queue)
299 {
300         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
301                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
302                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
303 }
304
305 static inline u64
306 lzx_lru_queue_R0(struct lzx_lru_queue queue)
307 {
308         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
309 }
310
311 static inline u64
312 lzx_lru_queue_R1(struct lzx_lru_queue queue)
313 {
314         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
315 }
316
317 static inline u64
318 lzx_lru_queue_R2(struct lzx_lru_queue queue)
319 {
320         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
321 }
322
323 /* Push a match offset onto the front (most recently used) end of the queue.  */
324 static inline struct lzx_lru_queue
325 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
326 {
327         return (struct lzx_lru_queue) {
328                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
329         };
330 }
331
332 /* Pop a match offset off the front (most recently used) end of the queue.  */
333 static inline u32
334 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
335 {
336         u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
337         queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
338         return offset;
339 }
340
341 /* Swap a match offset to the front of the queue.  */
342 static inline struct lzx_lru_queue
343 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
344 {
345         if (idx == 0)
346                 return queue;
347
348         if (idx == 1)
349                 return (struct lzx_lru_queue) {
350                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
351                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
352                              (queue.R & LZX_QUEUE64_R2_MASK),
353                 };
354
355         return (struct lzx_lru_queue) {
356                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
357                      (queue.R & LZX_QUEUE64_R1_MASK) |
358                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
359         };
360 }
361
362 /* The main LZX compressor structure  */
363 struct lzx_compressor {
364
365         /* The "nice" match length: if a match of this length is found, then
366          * choose it immediately without further consideration.  */
367         unsigned nice_match_length;
368
369         /* The maximum search depth: consider at most this many potential
370          * matches at each position.  */
371         unsigned max_search_depth;
372
373         /* The log base 2 of the LZX window size for LZ match offset encoding
374          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
375          * LZX_MAX_WINDOW_ORDER.  */
376         unsigned window_order;
377
378         /* The number of symbols in the main alphabet.  This depends on
379          * @window_order, since @window_order determines the maximum possible
380          * offset.  */
381         unsigned num_main_syms;
382
383         /* Number of optimization passes per block  */
384         unsigned num_optim_passes;
385
386         /* The preprocessed buffer of data being compressed  */
387         u8 *in_buffer;
388
389         /* The number of bytes of data to be compressed, which is the number of
390          * bytes of data in @in_buffer that are actually valid.  */
391         size_t in_nbytes;
392
393         /* Pointer to the compress() implementation chosen at allocation time */
394         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
395
396         /* If true, the compressor need not preserve the input buffer if it
397          * compresses the data successfully.  */
398         bool destructive;
399
400         /* The Huffman symbol frequency counters for the current block.  */
401         struct lzx_freqs freqs;
402
403         /* The Huffman codes for the current and previous blocks.  The one with
404          * index 'codes_index' is for the current block, and the other one is
405          * for the previous block.  */
406         struct lzx_codes codes[2];
407         unsigned codes_index;
408
409         /* The matches and literals that the parser has chosen for the current
410          * block.  The required length of this array is limited by the maximum
411          * number of matches that can ever be chosen for a single block.  */
412         struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
413
414         /* Tables for mapping adjusted offsets to offset slots  */
415
416         /* offset slots [0, 29]  */
417         u8 offset_slot_tab_1[32768];
418
419         /* offset slots [30, 49]  */
420         u8 offset_slot_tab_2[128];
421
422         union {
423                 /* Data for greedy or lazy parsing  */
424                 struct {
425                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
426                         struct hc_matchfinder hc_mf;
427                 };
428
429                 /* Data for near-optimal parsing  */
430                 struct {
431                         /*
432                          * The graph nodes for the current block.
433                          *
434                          * We need at least 'LZX_DIV_BLOCK_SIZE +
435                          * LZX_MAX_MATCH_LEN - 1' nodes because that is the
436                          * maximum block size that may be used.  Add 1 because
437                          * we need a node to represent end-of-block.
438                          *
439                          * It is possible that nodes past end-of-block are
440                          * accessed during match consideration, but this can
441                          * only occur if the block was truncated at
442                          * LZX_DIV_BLOCK_SIZE.  So the same bound still applies.
443                          * Note that since nodes past the end of the block will
444                          * never actually have an effect on the items that are
445                          * chosen for the block, it makes no difference what
446                          * their costs are initialized to (if anything).
447                          */
448                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
449                                                               LZX_MAX_MATCH_LEN - 1 + 1];
450
451                         /* The cost model for the current block  */
452                         struct lzx_costs costs;
453
454                         /*
455                          * Cached matches for the current block.  This array
456                          * contains the matches that were found at each position
457                          * in the block.  Specifically, for each position, there
458                          * is a special 'struct lz_match' whose 'length' field
459                          * contains the number of matches that were found at
460                          * that position; this is followed by the matches
461                          * themselves, if any, sorted by strictly increasing
462                          * length.
463                          *
464                          * Note: in rare cases, there will be a very high number
465                          * of matches in the block and this array will overflow.
466                          * If this happens, we force the end of the current
467                          * block.  LZX_CACHE_LENGTH is the length at which we
468                          * actually check for overflow.  The extra slots beyond
469                          * this are enough to absorb the worst case overflow,
470                          * which occurs if starting at
471                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
472                          * match count header, then write
473                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
474                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
475                          * write the match count header for each.
476                          */
477                         struct lz_match match_cache[LZX_CACHE_LENGTH +
478                                                     LZX_MAX_MATCHES_PER_POS +
479                                                     LZX_MAX_MATCH_LEN - 1];
480
481                         /* Hash table for finding length 2 matches  */
482                         pos_t hash2_tab[LZX_HASH2_LENGTH];
483
484                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
485                         struct bt_matchfinder bt_mf;
486                 };
487         };
488 };
489
490 /*
491  * Structure to keep track of the current state of sending bits to the
492  * compressed output buffer.
493  *
494  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
495  */
496 struct lzx_output_bitstream {
497
498         /* Bits that haven't yet been written to the output buffer.  */
499         machine_word_t bitbuf;
500
501         /* Number of bits currently held in @bitbuf.  */
502         u32 bitcount;
503
504         /* Pointer to the start of the output buffer.  */
505         u8 *start;
506
507         /* Pointer to the position in the output buffer at which the next coding
508          * unit should be written.  */
509         u8 *next;
510
511         /* Pointer just past the end of the output buffer, rounded down to a
512          * 2-byte boundary.  */
513         u8 *end;
514 };
515
516 /* Can the specified number of bits always be added to 'bitbuf' after any
517  * pending 16-bit coding units have been flushed?  */
518 #define CAN_BUFFER(n)   ((n) <= (8 * sizeof(machine_word_t)) - 16)
519
520 /*
521  * Initialize the output bitstream.
522  *
523  * @os
524  *      The output bitstream structure to initialize.
525  * @buffer
526  *      The buffer being written to.
527  * @size
528  *      Size of @buffer, in bytes.
529  */
530 static void
531 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
532 {
533         os->bitbuf = 0;
534         os->bitcount = 0;
535         os->start = buffer;
536         os->next = os->start;
537         os->end = os->start + (size & ~1);
538 }
539
540 /* Add some bits to the bitbuffer variable of the output bitstream.  The caller
541  * must make sure there is enough room.  */
542 static inline void
543 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
544 {
545         os->bitbuf = (os->bitbuf << num_bits) | bits;
546         os->bitcount += num_bits;
547 }
548
549 /* Flush bits from the bitbuffer variable to the output buffer.  'max_num_bits'
550  * specifies the maximum number of bits that may have been added since the last
551  * flush.  */
552 static inline void
553 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
554 {
555         if (os->end - os->next < 6)
556                 return;
557         put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
558         if (max_num_bits > 16)
559                 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
560         if (max_num_bits > 32)
561                 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
562         os->next += (os->bitcount >> 4) << 1;
563         os->bitcount &= 15;
564 }
565
566 /* Add at most 16 bits to the bitbuffer and flush it.  */
567 static inline void
568 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
569 {
570         lzx_add_bits(os, bits, num_bits);
571         lzx_flush_bits(os, 16);
572 }
573
574 /*
575  * Flush the last coding unit to the output buffer if needed.  Return the total
576  * number of bytes written to the output buffer, or 0 if an overflow occurred.
577  */
578 static u32
579 lzx_flush_output(struct lzx_output_bitstream *os)
580 {
581         if (os->end - os->next < 6)
582                 return 0;
583
584         if (os->bitcount != 0) {
585                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
586                 os->next += 2;
587         }
588
589         return os->next - os->start;
590 }
591
592 /* Build the main, length, and aligned offset Huffman codes used in LZX.
593  *
594  * This takes as input the frequency tables for each code and produces as output
595  * a set of tables that map symbols to codewords and codeword lengths.  */
596 static void
597 lzx_make_huffman_codes(struct lzx_compressor *c)
598 {
599         const struct lzx_freqs *freqs = &c->freqs;
600         struct lzx_codes *codes = &c->codes[c->codes_index];
601
602         STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
603                       MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
604         STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
605                       LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
606         STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
607                       ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
608
609         make_canonical_huffman_code(c->num_main_syms,
610                                     MAIN_CODEWORD_LIMIT,
611                                     freqs->main,
612                                     codes->lens.main,
613                                     codes->codewords.main);
614
615         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
616                                     LENGTH_CODEWORD_LIMIT,
617                                     freqs->len,
618                                     codes->lens.len,
619                                     codes->codewords.len);
620
621         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
622                                     ALIGNED_CODEWORD_LIMIT,
623                                     freqs->aligned,
624                                     codes->lens.aligned,
625                                     codes->codewords.aligned);
626 }
627
628 /* Reset the symbol frequencies for the LZX Huffman codes.  */
629 static void
630 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
631 {
632         memset(&c->freqs, 0, sizeof(c->freqs));
633 }
634
635 static unsigned
636 lzx_compute_precode_items(const u8 lens[restrict],
637                           const u8 prev_lens[restrict],
638                           u32 precode_freqs[restrict],
639                           unsigned precode_items[restrict])
640 {
641         unsigned *itemptr;
642         unsigned run_start;
643         unsigned run_end;
644         unsigned extra_bits;
645         int delta;
646         u8 len;
647
648         itemptr = precode_items;
649         run_start = 0;
650
651         while (!((len = lens[run_start]) & 0x80)) {
652
653                 /* len = the length being repeated  */
654
655                 /* Find the next run of codeword lengths.  */
656
657                 run_end = run_start + 1;
658
659                 /* Fast case for a single length.  */
660                 if (likely(len != lens[run_end])) {
661                         delta = prev_lens[run_start] - len;
662                         if (delta < 0)
663                                 delta += 17;
664                         precode_freqs[delta]++;
665                         *itemptr++ = delta;
666                         run_start++;
667                         continue;
668                 }
669
670                 /* Extend the run.  */
671                 do {
672                         run_end++;
673                 } while (len == lens[run_end]);
674
675                 if (len == 0) {
676                         /* Run of zeroes.  */
677
678                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
679                         while ((run_end - run_start) >= 20) {
680                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
681                                 precode_freqs[18]++;
682                                 *itemptr++ = 18 | (extra_bits << 5);
683                                 run_start += 20 + extra_bits;
684                         }
685
686                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
687                         if ((run_end - run_start) >= 4) {
688                                 extra_bits = min((run_end - run_start) - 4, 0xf);
689                                 precode_freqs[17]++;
690                                 *itemptr++ = 17 | (extra_bits << 5);
691                                 run_start += 4 + extra_bits;
692                         }
693                 } else {
694
695                         /* A run of nonzero lengths. */
696
697                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
698                         while ((run_end - run_start) >= 4) {
699                                 extra_bits = (run_end - run_start) > 4;
700                                 delta = prev_lens[run_start] - len;
701                                 if (delta < 0)
702                                         delta += 17;
703                                 precode_freqs[19]++;
704                                 precode_freqs[delta]++;
705                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
706                                 run_start += 4 + extra_bits;
707                         }
708                 }
709
710                 /* Output any remaining lengths without RLE.  */
711                 while (run_start != run_end) {
712                         delta = prev_lens[run_start] - len;
713                         if (delta < 0)
714                                 delta += 17;
715                         precode_freqs[delta]++;
716                         *itemptr++ = delta;
717                         run_start++;
718                 }
719         }
720
721         return itemptr - precode_items;
722 }
723
724 /*
725  * Output a Huffman code in the compressed form used in LZX.
726  *
727  * The Huffman code is represented in the output as a logical series of codeword
728  * lengths from which the Huffman code, which must be in canonical form, can be
729  * reconstructed.
730  *
731  * The codeword lengths are themselves compressed using a separate Huffman code,
732  * the "precode", which contains a symbol for each possible codeword length in
733  * the larger code as well as several special symbols to represent repeated
734  * codeword lengths (a form of run-length encoding).  The precode is itself
735  * constructed in canonical form, and its codeword lengths are represented
736  * literally in 20 4-bit fields that immediately precede the compressed codeword
737  * lengths of the larger code.
738  *
739  * Furthermore, the codeword lengths of the larger code are actually represented
740  * as deltas from the codeword lengths of the corresponding code in the previous
741  * block.
742  *
743  * @os:
744  *      Bitstream to which to write the compressed Huffman code.
745  * @lens:
746  *      The codeword lengths, indexed by symbol, in the Huffman code.
747  * @prev_lens:
748  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
749  *      code in the previous block, or all zeroes if this is the first block.
750  * @num_lens:
751  *      The number of symbols in the Huffman code.
752  */
753 static void
754 lzx_write_compressed_code(struct lzx_output_bitstream *os,
755                           const u8 lens[restrict],
756                           const u8 prev_lens[restrict],
757                           unsigned num_lens)
758 {
759         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
760         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
761         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
762         unsigned precode_items[num_lens];
763         unsigned num_precode_items;
764         unsigned precode_item;
765         unsigned precode_sym;
766         unsigned i;
767         u8 saved = lens[num_lens];
768         *(u8 *)(lens + num_lens) = 0x80;
769
770         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
771                 precode_freqs[i] = 0;
772
773         /* Compute the "items" (RLE / literal tokens and extra bits) with which
774          * the codeword lengths in the larger code will be output.  */
775         num_precode_items = lzx_compute_precode_items(lens,
776                                                       prev_lens,
777                                                       precode_freqs,
778                                                       precode_items);
779
780         /* Build the precode.  */
781         STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
782                       PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
783         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
784                                     PRE_CODEWORD_LIMIT,
785                                     precode_freqs, precode_lens,
786                                     precode_codewords);
787
788         /* Output the lengths of the codewords in the precode.  */
789         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
790                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
791
792         /* Output the encoded lengths of the codewords in the larger code.  */
793         for (i = 0; i < num_precode_items; i++) {
794                 precode_item = precode_items[i];
795                 precode_sym = precode_item & 0x1F;
796                 lzx_add_bits(os, precode_codewords[precode_sym],
797                              precode_lens[precode_sym]);
798                 if (precode_sym >= 17) {
799                         if (precode_sym == 17) {
800                                 lzx_add_bits(os, precode_item >> 5, 4);
801                         } else if (precode_sym == 18) {
802                                 lzx_add_bits(os, precode_item >> 5, 5);
803                         } else {
804                                 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
805                                 precode_sym = precode_item >> 6;
806                                 lzx_add_bits(os, precode_codewords[precode_sym],
807                                              precode_lens[precode_sym]);
808                         }
809                 }
810                 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
811                 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
812         }
813
814         *(u8 *)(lens + num_lens) = saved;
815 }
816
817 /*
818  * Write all matches and literal bytes (which were precomputed) in an LZX
819  * compressed block to the output bitstream in the final compressed
820  * representation.
821  *
822  * @os
823  *      The output bitstream.
824  * @block_type
825  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
826  *      LZX_BLOCKTYPE_VERBATIM).
827  * @block_data
828  *      The uncompressed data of the block.
829  * @sequences
830  *      The matches and literals to output, given as a series of sequences.
831  * @codes
832  *      The main, length, and aligned offset Huffman codes for the current
833  *      LZX compressed block.
834  */
835 static void
836 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
837                     const u8 *block_data, const struct lzx_sequence sequences[],
838                     const struct lzx_codes *codes)
839 {
840         const struct lzx_sequence *seq = sequences;
841         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
842
843         for (;;) {
844                 /* Output the next sequence.  */
845
846                 unsigned litrunlen = seq->litrunlen;
847                 unsigned match_hdr;
848                 unsigned main_symbol;
849                 unsigned adjusted_length;
850                 u32 adjusted_offset;
851                 unsigned offset_slot;
852                 unsigned num_extra_bits;
853                 u32 extra_bits;
854
855                 /* Output the literal run of the sequence.  */
856
857                 if (litrunlen) {  /* Is the literal run nonempty?  */
858
859                         /* Verify optimization is enabled on 64-bit  */
860                         STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
861                                       CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
862
863                         if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
864
865                                 /* 64-bit: write 4 literals at a time.  */
866                                 while (litrunlen >= 4) {
867                                         unsigned lit0 = block_data[0];
868                                         unsigned lit1 = block_data[1];
869                                         unsigned lit2 = block_data[2];
870                                         unsigned lit3 = block_data[3];
871                                         lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
872                                         lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
873                                         lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
874                                         lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
875                                         lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
876                                         block_data += 4;
877                                         litrunlen -= 4;
878                                 }
879                                 if (litrunlen--) {
880                                         unsigned lit = *block_data++;
881                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
882                                         if (litrunlen--) {
883                                                 unsigned lit = *block_data++;
884                                                 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
885                                                 if (litrunlen--) {
886                                                         unsigned lit = *block_data++;
887                                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
888                                                         lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
889                                                 } else {
890                                                         lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
891                                                 }
892                                         } else {
893                                                 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
894                                         }
895                                 }
896                         } else {
897                                 /* 32-bit: write 1 literal at a time.  */
898                                 do {
899                                         unsigned lit = *block_data++;
900                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
901                                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
902                                 } while (--litrunlen);
903                         }
904                 }
905
906                 /* Was this the last literal run?  */
907                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
908                         return;
909
910                 /* Nope; output the match.  */
911
912                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
913                 main_symbol = LZX_NUM_CHARS + match_hdr;
914                 adjusted_length = seq->adjusted_length;
915
916                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
917
918                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
919                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
920
921                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
922                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
923
924         #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
925                                  14 + ALIGNED_CODEWORD_LIMIT)
926
927                 /* Verify optimization is enabled on 64-bit  */
928                 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
929
930                 /* Output the main symbol for the match.  */
931
932                 lzx_add_bits(os, codes->codewords.main[main_symbol],
933                              codes->lens.main[main_symbol]);
934                 if (!CAN_BUFFER(MAX_MATCH_BITS))
935                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
936
937                 /* If needed, output the length symbol for the match.  */
938
939                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
940                         lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
941                                      codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
942                         if (!CAN_BUFFER(MAX_MATCH_BITS))
943                                 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
944                 }
945
946                 /* Output the extra offset bits for the match.  In aligned
947                  * offset blocks, the lowest 3 bits of the adjusted offset are
948                  * Huffman-encoded using the aligned offset code, provided that
949                  * there are at least extra 3 offset bits required.  All other
950                  * extra offset bits are output verbatim.  */
951
952                 if ((adjusted_offset & ones_if_aligned) >= 16) {
953
954                         lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
955                                      num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
956                         if (!CAN_BUFFER(MAX_MATCH_BITS))
957                                 lzx_flush_bits(os, 14);
958
959                         lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
960                                      codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
961                         if (!CAN_BUFFER(MAX_MATCH_BITS))
962                                 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
963                 } else {
964                         lzx_add_bits(os, extra_bits, num_extra_bits);
965                         if (!CAN_BUFFER(MAX_MATCH_BITS))
966                                 lzx_flush_bits(os, 17);
967                 }
968
969                 if (CAN_BUFFER(MAX_MATCH_BITS))
970                         lzx_flush_bits(os, MAX_MATCH_BITS);
971
972                 /* Advance to the next sequence.  */
973                 seq++;
974         }
975 }
976
977 static void
978 lzx_write_compressed_block(const u8 *block_begin,
979                            int block_type,
980                            u32 block_size,
981                            unsigned window_order,
982                            unsigned num_main_syms,
983                            const struct lzx_sequence sequences[],
984                            const struct lzx_codes * codes,
985                            const struct lzx_lens * prev_lens,
986                            struct lzx_output_bitstream * os)
987 {
988         LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
989                    block_type == LZX_BLOCKTYPE_VERBATIM);
990
991         /* The first three bits indicate the type of block and are one of the
992          * LZX_BLOCKTYPE_* constants.  */
993         lzx_write_bits(os, block_type, 3);
994
995         /* Output the block size.
996          *
997          * The original LZX format seemed to always encode the block size in 3
998          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
999          * uses the first bit to indicate whether the block is the default size
1000          * (32768) or a different size given explicitly by the next 16 bits.
1001          *
1002          * By default, this compressor uses a window size of 32768 and therefore
1003          * follows the WIMGAPI behavior.  However, this compressor also supports
1004          * window sizes greater than 32768 bytes, which do not appear to be
1005          * supported by WIMGAPI.  In such cases, we retain the default size bit
1006          * to mean a size of 32768 bytes but output non-default block size in 24
1007          * bits rather than 16.  The compatibility of this behavior is unknown
1008          * because WIMs created with chunk size greater than 32768 can seemingly
1009          * only be opened by wimlib anyway.  */
1010         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1011                 lzx_write_bits(os, 1, 1);
1012         } else {
1013                 lzx_write_bits(os, 0, 1);
1014
1015                 if (window_order >= 16)
1016                         lzx_write_bits(os, block_size >> 16, 8);
1017
1018                 lzx_write_bits(os, block_size & 0xFFFF, 16);
1019         }
1020
1021         /* If it's an aligned offset block, output the aligned offset code.  */
1022         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1023                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1024                         lzx_write_bits(os, codes->lens.aligned[i],
1025                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
1026                 }
1027         }
1028
1029         /* Output the main code (two parts).  */
1030         lzx_write_compressed_code(os, codes->lens.main,
1031                                   prev_lens->main,
1032                                   LZX_NUM_CHARS);
1033         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1034                                   prev_lens->main + LZX_NUM_CHARS,
1035                                   num_main_syms - LZX_NUM_CHARS);
1036
1037         /* Output the length code.  */
1038         lzx_write_compressed_code(os, codes->lens.len,
1039                                   prev_lens->len,
1040                                   LZX_LENCODE_NUM_SYMBOLS);
1041
1042         /* Output the compressed matches and literals.  */
1043         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1044 }
1045
1046 /* Given the frequencies of symbols in an LZX-compressed block and the
1047  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1048  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1049  * will take fewer bits to output.  */
1050 static int
1051 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1052                                const struct lzx_codes * codes)
1053 {
1054         u32 aligned_cost = 0;
1055         u32 verbatim_cost = 0;
1056
1057         /* A verbatim block requires 3 bits in each place that an aligned symbol
1058          * would be used in an aligned offset block.  */
1059         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1060                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1061                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1062         }
1063
1064         /* Account for output of the aligned offset code.  */
1065         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1066
1067         if (aligned_cost < verbatim_cost)
1068                 return LZX_BLOCKTYPE_ALIGNED;
1069         else
1070                 return LZX_BLOCKTYPE_VERBATIM;
1071 }
1072
1073 /*
1074  * Return the offset slot for the specified adjusted match offset, using the
1075  * compressor's acceleration tables to speed up the mapping.
1076  */
1077 static inline unsigned
1078 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
1079 {
1080         if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1081                 return c->offset_slot_tab_1[adjusted_offset];
1082         return c->offset_slot_tab_2[adjusted_offset >> 14];
1083 }
1084
1085 /*
1086  * Finish an LZX block:
1087  *
1088  * - build the Huffman codes
1089  * - decide whether to output the block as VERBATIM or ALIGNED
1090  * - output the block
1091  * - swap the indices of the current and previous Huffman codes
1092  */
1093 static void
1094 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1095                  const u8 *block_begin, u32 block_size, u32 seq_idx)
1096 {
1097         int block_type;
1098
1099         lzx_make_huffman_codes(c);
1100
1101         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1102                                                     &c->codes[c->codes_index]);
1103         lzx_write_compressed_block(block_begin,
1104                                    block_type,
1105                                    block_size,
1106                                    c->window_order,
1107                                    c->num_main_syms,
1108                                    &c->chosen_sequences[seq_idx],
1109                                    &c->codes[c->codes_index],
1110                                    &c->codes[c->codes_index ^ 1].lens,
1111                                    os);
1112         c->codes_index ^= 1;
1113 }
1114
1115 /* Tally the Huffman symbol for a literal and increment the literal run length.
1116  */
1117 static inline void
1118 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1119 {
1120         c->freqs.main[literal]++;
1121         ++*litrunlen_p;
1122 }
1123
1124 /* Tally the Huffman symbol for a match, save the match data and the length of
1125  * the preceding literal run in the next lzx_sequence, and update the recent
1126  * offsets queue.  */
1127 static inline void
1128 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1129                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1130                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1131 {
1132         u32 litrunlen = *litrunlen_p;
1133         struct lzx_sequence *next_seq = *next_seq_p;
1134         unsigned offset_slot;
1135         unsigned v;
1136
1137         v = length - LZX_MIN_MATCH_LEN;
1138
1139         /* Save the literal run length and adjusted length.  */
1140         next_seq->litrunlen = litrunlen;
1141         next_seq->adjusted_length = v;
1142
1143         /* Compute the length header and tally the length symbol if needed  */
1144         if (v >= LZX_NUM_PRIMARY_LENS) {
1145                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1146                 v = LZX_NUM_PRIMARY_LENS;
1147         }
1148
1149         /* Compute the offset slot  */
1150         offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1151
1152         /* Compute the match header.  */
1153         v += offset_slot * LZX_NUM_LEN_HEADERS;
1154
1155         /* Save the adjusted offset and match header.  */
1156         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1157
1158         /* Tally the main symbol.  */
1159         c->freqs.main[LZX_NUM_CHARS + v]++;
1160
1161         /* Update the recent offsets queue.  */
1162         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1163                 /* Repeat offset match  */
1164                 swap(recent_offsets[0], recent_offsets[offset_data]);
1165         } else {
1166                 /* Explicit offset match  */
1167
1168                 /* Tally the aligned offset symbol if needed  */
1169                 if (offset_data >= 16)
1170                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1171
1172                 recent_offsets[2] = recent_offsets[1];
1173                 recent_offsets[1] = recent_offsets[0];
1174                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1175         }
1176
1177         /* Reset the literal run length and advance to the next sequence.  */
1178         *next_seq_p = next_seq + 1;
1179         *litrunlen_p = 0;
1180 }
1181
1182 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1183  * there is no match.  This literal run may be empty.  */
1184 static inline void
1185 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1186 {
1187         last_seq->litrunlen = litrunlen;
1188
1189         /* Special value to mark last sequence  */
1190         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1191 }
1192
1193 /*
1194  * Given the minimum-cost path computed through the item graph for the current
1195  * block, walk the path and count how many of each symbol in each Huffman-coded
1196  * alphabet would be required to output the items (matches and literals) along
1197  * the path.
1198  *
1199  * Note that the path will be walked backwards (from the end of the block to the
1200  * beginning of the block), but this doesn't matter because this function only
1201  * computes frequencies.
1202  */
1203 static void
1204 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
1205 {
1206         u32 node_idx = block_size;
1207         for (;;) {
1208                 u32 len;
1209                 u32 offset_data;
1210                 unsigned v;
1211                 unsigned offset_slot;
1212
1213                 /* Tally literals until either a match or the beginning of the
1214                  * block is reached.  */
1215                 for (;;) {
1216                         u32 item = c->optimum_nodes[node_idx].item;
1217
1218                         len = item & OPTIMUM_LEN_MASK;
1219                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1220
1221                         if (len != 0)  /* Not a literal?  */
1222                                 break;
1223
1224                         /* Tally the main symbol for the literal.  */
1225                         c->freqs.main[offset_data]++;
1226
1227                         if (--node_idx == 0) /* Beginning of block was reached?  */
1228                                 return;
1229                 }
1230
1231                 node_idx -= len;
1232
1233                 /* Tally a match.  */
1234
1235                 /* Tally the aligned offset symbol if needed.  */
1236                 if (offset_data >= 16)
1237                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1238
1239                 /* Tally the length symbol if needed.  */
1240                 v = len - LZX_MIN_MATCH_LEN;;
1241                 if (v >= LZX_NUM_PRIMARY_LENS) {
1242                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1243                         v = LZX_NUM_PRIMARY_LENS;
1244                 }
1245
1246                 /* Tally the main symbol.  */
1247                 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1248                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1249                 c->freqs.main[LZX_NUM_CHARS + v]++;
1250
1251                 if (node_idx == 0) /* Beginning of block was reached?  */
1252                         return;
1253         }
1254 }
1255
1256 /*
1257  * Like lzx_tally_item_list(), but this function also generates the list of
1258  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1259  * ready to be output to the bitstream after the Huffman codes are computed.
1260  * The lzx_sequences will be written to decreasing memory addresses as the path
1261  * is walked backwards, which means they will end up in the expected
1262  * first-to-last order.  The return value is the index in c->chosen_sequences at
1263  * which the lzx_sequences begin.
1264  */
1265 static u32
1266 lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
1267 {
1268         u32 node_idx = block_size;
1269         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1270         u32 lit_start_node;
1271
1272         /* Special value to mark last sequence  */
1273         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1274
1275         lit_start_node = node_idx;
1276         for (;;) {
1277                 u32 len;
1278                 u32 offset_data;
1279                 unsigned v;
1280                 unsigned offset_slot;
1281
1282                 /* Record literals until either a match or the beginning of the
1283                  * block is reached.  */
1284                 for (;;) {
1285                         u32 item = c->optimum_nodes[node_idx].item;
1286
1287                         len = item & OPTIMUM_LEN_MASK;
1288                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1289
1290                         if (len != 0) /* Not a literal?  */
1291                                 break;
1292
1293                         /* Tally the main symbol for the literal.  */
1294                         c->freqs.main[offset_data]++;
1295
1296                         if (--node_idx == 0) /* Beginning of block was reached?  */
1297                                 goto out;
1298                 }
1299
1300                 /* Save the literal run length for the next sequence (the
1301                  * "previous sequence" when walking backwards).  */
1302                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1303                 node_idx -= len;
1304                 lit_start_node = node_idx;
1305
1306                 /* Record a match.  */
1307
1308                 /* Tally the aligned offset symbol if needed.  */
1309                 if (offset_data >= 16)
1310                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1311
1312                 /* Save the adjusted length.  */
1313                 v = len - LZX_MIN_MATCH_LEN;
1314                 c->chosen_sequences[seq_idx].adjusted_length = v;
1315
1316                 /* Tally the length symbol if needed.  */
1317                 if (v >= LZX_NUM_PRIMARY_LENS) {
1318                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1319                         v = LZX_NUM_PRIMARY_LENS;
1320                 }
1321
1322                 /* Tally the main symbol.  */
1323                 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1324                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1325                 c->freqs.main[LZX_NUM_CHARS + v]++;
1326
1327                 /* Save the adjusted offset and match header.  */
1328                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1329                                 (offset_data << 9) | v;
1330
1331                 if (node_idx == 0) /* Beginning of block was reached?  */
1332                         goto out;
1333         }
1334
1335 out:
1336         /* Save the literal run length for the first sequence.  */
1337         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1338
1339         /* Return the index in c->chosen_sequences at which the lzx_sequences
1340          * begin.  */
1341         return seq_idx;
1342 }
1343
1344 /*
1345  * Find an inexpensive path through the graph of possible match/literal choices
1346  * for the current block.  The nodes of the graph are
1347  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1348  * the current block, plus one extra node for end-of-block.  The edges of the
1349  * graph are matches and literals.  The goal is to find the minimum cost path
1350  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1351  *
1352  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1353  * proceeding forwards one node at a time.  At each node, a selection of matches
1354  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1355  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1356  * such a path is the lowest cost found so far to reach that later node, then
1357  * that later node is updated with the new path.
1358  *
1359  * Note that although this algorithm is based on minimum cost path search, due
1360  * to various simplifying assumptions the result is not guaranteed to be the
1361  * true minimum cost, or "optimal", path over the graph of all valid LZX
1362  * representations of this block.
1363  *
1364  * Also, note that because of the presence of the recent offsets queue (which is
1365  * a type of adaptive state), the algorithm cannot work backwards and compute
1366  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1367  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1368  * only an approximation.  It's possible for the globally optimal, minimum cost
1369  * path to contain a prefix, ending at a position, where that path prefix is
1370  * *not* the minimum cost path to that position.  This can happen if such a path
1371  * prefix results in a different adaptive state which results in lower costs
1372  * later.  The algorithm does not solve this problem; it only considers the
1373  * lowest cost to reach each individual position.
1374  */
1375 static struct lzx_lru_queue
1376 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1377                        const u8 * const restrict block_begin,
1378                        const u32 block_size,
1379                        const struct lzx_lru_queue initial_queue)
1380 {
1381         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1382         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1383         struct lz_match *cache_ptr = c->match_cache;
1384         const u8 *in_next = block_begin;
1385         const u8 * const block_end = block_begin + block_size;
1386
1387         /* Instead of storing the match offset LRU queues in the
1388          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1389          * storing them in a smaller array.  This works because the algorithm
1390          * only requires a limited history of the adaptive state.  Once a given
1391          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1392          * it is no longer needed.  */
1393         struct lzx_lru_queue queues[512];
1394
1395         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1396 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1397
1398         /* Initially, the cost to reach each node is "infinity".  */
1399         memset(c->optimum_nodes, 0xFF,
1400                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1401
1402         QUEUE(block_begin) = initial_queue;
1403
1404         /* The following loop runs 'block_size' iterations, one per node.  */
1405         do {
1406                 unsigned num_matches;
1407                 unsigned literal;
1408                 u32 cost;
1409
1410                 /*
1411                  * A selection of matches for the block was already saved in
1412                  * memory so that we don't have to run the uncompressed data
1413                  * through the matchfinder on every optimization pass.  However,
1414                  * we still search for repeat offset matches during each
1415                  * optimization pass because we cannot predict the state of the
1416                  * recent offsets queue.  But as a heuristic, we don't bother
1417                  * searching for repeat offset matches if the general-purpose
1418                  * matchfinder failed to find any matches.
1419                  *
1420                  * Note that a match of length n at some offset implies there is
1421                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1422                  * that same offset.  In other words, we don't necessarily need
1423                  * to use the full length of a match.  The key heuristic that
1424                  * saves a significicant amount of time is that for each
1425                  * distinct length, we only consider the smallest offset for
1426                  * which that length is available.  This heuristic also applies
1427                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1428                  * any explicit offset.  Of course, this heuristic may be
1429                  * produce suboptimal results because offset slots in LZX are
1430                  * subject to entropy encoding, but in practice this is a useful
1431                  * heuristic.
1432                  */
1433
1434                 num_matches = cache_ptr->length;
1435                 cache_ptr++;
1436
1437                 if (num_matches) {
1438                         struct lz_match *end_matches = cache_ptr + num_matches;
1439                         unsigned next_len = LZX_MIN_MATCH_LEN;
1440                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1441                         const u8 *matchptr;
1442
1443                         /* Consider R0 match  */
1444                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1445                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1446                                 goto R0_done;
1447                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1448                         do {
1449                                 u32 cost = cur_node->cost +
1450                                            c->costs.match_cost[0][
1451                                                         next_len - LZX_MIN_MATCH_LEN];
1452                                 if (cost <= (cur_node + next_len)->cost) {
1453                                         (cur_node + next_len)->cost = cost;
1454                                         (cur_node + next_len)->item =
1455                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1456                                 }
1457                                 if (unlikely(++next_len > max_len)) {
1458                                         cache_ptr = end_matches;
1459                                         goto done_matches;
1460                                 }
1461                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1462
1463                 R0_done:
1464
1465                         /* Consider R1 match  */
1466                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1467                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1468                                 goto R1_done;
1469                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1470                                 goto R1_done;
1471                         for (unsigned len = 2; len < next_len - 1; len++)
1472                                 if (matchptr[len] != in_next[len])
1473                                         goto R1_done;
1474                         do {
1475                                 u32 cost = cur_node->cost +
1476                                            c->costs.match_cost[1][
1477                                                         next_len - LZX_MIN_MATCH_LEN];
1478                                 if (cost <= (cur_node + next_len)->cost) {
1479                                         (cur_node + next_len)->cost = cost;
1480                                         (cur_node + next_len)->item =
1481                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1482                                 }
1483                                 if (unlikely(++next_len > max_len)) {
1484                                         cache_ptr = end_matches;
1485                                         goto done_matches;
1486                                 }
1487                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1488
1489                 R1_done:
1490
1491                         /* Consider R2 match  */
1492                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1493                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1494                                 goto R2_done;
1495                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1496                                 goto R2_done;
1497                         for (unsigned len = 2; len < next_len - 1; len++)
1498                                 if (matchptr[len] != in_next[len])
1499                                         goto R2_done;
1500                         do {
1501                                 u32 cost = cur_node->cost +
1502                                            c->costs.match_cost[2][
1503                                                         next_len - LZX_MIN_MATCH_LEN];
1504                                 if (cost <= (cur_node + next_len)->cost) {
1505                                         (cur_node + next_len)->cost = cost;
1506                                         (cur_node + next_len)->item =
1507                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1508                                 }
1509                                 if (unlikely(++next_len > max_len)) {
1510                                         cache_ptr = end_matches;
1511                                         goto done_matches;
1512                                 }
1513                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1514
1515                 R2_done:
1516
1517                         while (next_len > cache_ptr->length)
1518                                 if (++cache_ptr == end_matches)
1519                                         goto done_matches;
1520
1521                         /* Consider explicit offset matches  */
1522                         do {
1523                                 u32 offset = cache_ptr->offset;
1524                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1525                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1526                                 do {
1527                                         u32 cost = cur_node->cost +
1528                                                    c->costs.match_cost[offset_slot][
1529                                                                 next_len - LZX_MIN_MATCH_LEN];
1530                                 #if LZX_CONSIDER_ALIGNED_COSTS
1531                                         if (lzx_extra_offset_bits[offset_slot] >=
1532                                             LZX_NUM_ALIGNED_OFFSET_BITS)
1533                                                 cost += c->costs.aligned[offset_data &
1534                                                                          LZX_ALIGNED_OFFSET_BITMASK];
1535                                 #endif
1536                                         if (cost < (cur_node + next_len)->cost) {
1537                                                 (cur_node + next_len)->cost = cost;
1538                                                 (cur_node + next_len)->item =
1539                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1540                                         }
1541                                 } while (++next_len <= cache_ptr->length);
1542                         } while (++cache_ptr != end_matches);
1543                 }
1544
1545         done_matches:
1546
1547                 /* Consider coding a literal.
1548
1549                  * To avoid an extra branch, actually checking the preferability
1550                  * of coding the literal is integrated into the queue update
1551                  * code below.  */
1552                 literal = *in_next++;
1553                 cost = cur_node->cost +
1554                        c->costs.main[lzx_main_symbol_for_literal(literal)];
1555
1556                 /* Advance to the next position.  */
1557                 cur_node++;
1558
1559                 /* The lowest-cost path to the current position is now known.
1560                  * Finalize the recent offsets queue that results from taking
1561                  * this lowest-cost path.  */
1562
1563                 if (cost <= cur_node->cost) {
1564                         /* Literal: queue remains unchanged.  */
1565                         cur_node->cost = cost;
1566                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1567                         QUEUE(in_next) = QUEUE(in_next - 1);
1568                 } else {
1569                         /* Match: queue update is needed.  */
1570                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1571                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1572                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1573                                 /* Explicit offset match: insert offset at front  */
1574                                 QUEUE(in_next) =
1575                                         lzx_lru_queue_push(QUEUE(in_next - len),
1576                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1577                         } else {
1578                                 /* Repeat offset match: swap offset to front  */
1579                                 QUEUE(in_next) =
1580                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1581                                                            offset_data);
1582                         }
1583                 }
1584         } while (cur_node != end_node);
1585
1586         /* Return the match offset queue at the end of the minimum cost path. */
1587         return QUEUE(block_end);
1588 }
1589
1590 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1591 static void
1592 lzx_compute_match_costs(struct lzx_compressor *c)
1593 {
1594         unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1595         struct lzx_costs *costs = &c->costs;
1596
1597         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1598
1599                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1600                 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1601                 unsigned i;
1602
1603         #if LZX_CONSIDER_ALIGNED_COSTS
1604                 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1605                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1606         #endif
1607
1608                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1609                         costs->match_cost[offset_slot][i] =
1610                                 costs->main[main_symbol++] + extra_cost;
1611
1612                 extra_cost += costs->main[main_symbol];
1613
1614                 for (; i < LZX_NUM_LENS; i++)
1615                         costs->match_cost[offset_slot][i] =
1616                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1617         }
1618 }
1619
1620 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1621  * algorithm.  */
1622 static void
1623 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1624 {
1625         u32 i;
1626         bool have_byte[256];
1627         unsigned num_used_bytes;
1628
1629         /* The costs below are hard coded to use a scaling factor of 16.  */
1630         STATIC_ASSERT(LZX_BIT_COST == 16);
1631
1632         /*
1633          * Heuristics:
1634          *
1635          * - Use smaller initial costs for literal symbols when the input buffer
1636          *   contains fewer distinct bytes.
1637          *
1638          * - Assume that match symbols are more costly than literal symbols.
1639          *
1640          * - Assume that length symbols for shorter lengths are less costly than
1641          *   length symbols for longer lengths.
1642          */
1643
1644         for (i = 0; i < 256; i++)
1645                 have_byte[i] = false;
1646
1647         for (i = 0; i < block_size; i++)
1648                 have_byte[block[i]] = true;
1649
1650         num_used_bytes = 0;
1651         for (i = 0; i < 256; i++)
1652                 num_used_bytes += have_byte[i];
1653
1654         for (i = 0; i < 256; i++)
1655                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1656
1657         for (; i < c->num_main_syms; i++)
1658                 c->costs.main[i] = 170;
1659
1660         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1661                 c->costs.len[i] = 103 + (i / 4);
1662
1663 #if LZX_CONSIDER_ALIGNED_COSTS
1664         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1665                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1666 #endif
1667
1668         lzx_compute_match_costs(c);
1669 }
1670
1671 /* Update the current cost model to reflect the computed Huffman codes.  */
1672 static void
1673 lzx_update_costs(struct lzx_compressor *c)
1674 {
1675         unsigned i;
1676         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1677
1678         for (i = 0; i < c->num_main_syms; i++)
1679                 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1680
1681         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1682                 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1683
1684 #if LZX_CONSIDER_ALIGNED_COSTS
1685         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1686                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1687 #endif
1688
1689         lzx_compute_match_costs(c);
1690 }
1691
1692 static struct lzx_lru_queue
1693 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1694                              struct lzx_output_bitstream * const restrict os,
1695                              const u8 * const restrict block_begin,
1696                              const u32 block_size,
1697                              const struct lzx_lru_queue initial_queue)
1698 {
1699         unsigned num_passes_remaining = c->num_optim_passes;
1700         struct lzx_lru_queue new_queue;
1701         u32 seq_idx;
1702
1703         /* The first optimization pass uses a default cost model.  Each
1704          * additional optimization pass uses a cost model derived from the
1705          * Huffman code computed in the previous pass.  */
1706
1707         lzx_set_default_costs(c, block_begin, block_size);
1708         lzx_reset_symbol_frequencies(c);
1709         do {
1710                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1711                                                    initial_queue);
1712                 if (num_passes_remaining > 1) {
1713                         lzx_tally_item_list(c, block_size);
1714                         lzx_make_huffman_codes(c);
1715                         lzx_update_costs(c);
1716                         lzx_reset_symbol_frequencies(c);
1717                 }
1718         } while (--num_passes_remaining);
1719
1720         seq_idx = lzx_record_item_list(c, block_size);
1721         lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1722         return new_queue;
1723 }
1724
1725 /*
1726  * This is the "near-optimal" LZX compressor.
1727  *
1728  * For each block, it performs a relatively thorough graph search to find an
1729  * inexpensive (in terms of compressed size) way to output that block.
1730  *
1731  * Note: there are actually many things this algorithm leaves on the table in
1732  * terms of compression ratio.  So although it may be "near-optimal", it is
1733  * certainly not "optimal".  The goal is not to produce the optimal compression
1734  * ratio, which for LZX is probably impossible within any practical amount of
1735  * time, but rather to produce a compression ratio significantly better than a
1736  * simpler "greedy" or "lazy" parse while still being relatively fast.
1737  */
1738 static void
1739 lzx_compress_near_optimal(struct lzx_compressor *c,
1740                           struct lzx_output_bitstream *os)
1741 {
1742         const u8 * const in_begin = c->in_buffer;
1743         const u8 *       in_next = in_begin;
1744         const u8 * const in_end  = in_begin + c->in_nbytes;
1745         unsigned max_len = LZX_MAX_MATCH_LEN;
1746         unsigned nice_len = min(c->nice_match_length, max_len);
1747         u32 next_hash;
1748         struct lzx_lru_queue queue;
1749
1750         bt_matchfinder_init(&c->bt_mf);
1751         memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1752         next_hash = bt_matchfinder_hash_3_bytes(in_next);
1753         lzx_lru_queue_init(&queue);
1754
1755         do {
1756                 /* Starting a new block  */
1757                 const u8 * const in_block_begin = in_next;
1758                 const u8 * const in_block_end =
1759                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1760
1761                 /* Run the block through the matchfinder and cache the matches. */
1762                 struct lz_match *cache_ptr = c->match_cache;
1763                 do {
1764                         struct lz_match *lz_matchptr;
1765                         u32 hash2;
1766                         pos_t cur_match;
1767                         unsigned best_len;
1768
1769                         /* If approaching the end of the input buffer, adjust
1770                          * 'max_len' and 'nice_len' accordingly.  */
1771                         if (unlikely(max_len > in_end - in_next)) {
1772                                 max_len = in_end - in_next;
1773                                 nice_len = min(max_len, nice_len);
1774
1775                                 /* This extra check is needed to ensure that we
1776                                  * never output a length 2 match of the very
1777                                  * last two bytes with the very first two bytes,
1778                                  * since such a match has an offset too large to
1779                                  * be represented.  */
1780                                 if (unlikely(max_len < 3)) {
1781                                         in_next++;
1782                                         cache_ptr->length = 0;
1783                                         cache_ptr++;
1784                                         continue;
1785                                 }
1786                         }
1787
1788                         lz_matchptr = cache_ptr + 1;
1789
1790                         /* Check for a length 2 match.  */
1791                         hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1792                         cur_match = c->hash2_tab[hash2];
1793                         c->hash2_tab[hash2] = in_next - in_begin;
1794                         if (cur_match != 0 &&
1795                             (LZX_HASH2_ORDER == 16 ||
1796                              load_u16_unaligned(&in_begin[cur_match]) ==
1797                              load_u16_unaligned(in_next)))
1798                         {
1799                                 lz_matchptr->length = 2;
1800                                 lz_matchptr->offset = in_next - &in_begin[cur_match];
1801                                 lz_matchptr++;
1802                         }
1803
1804                         /* Check for matches of length >= 3.  */
1805                         lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1806                                                                  in_begin,
1807                                                                  in_next,
1808                                                                  3,
1809                                                                  max_len,
1810                                                                  nice_len,
1811                                                                  c->max_search_depth,
1812                                                                  &next_hash,
1813                                                                  &best_len,
1814                                                                  lz_matchptr);
1815                         in_next++;
1816                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1817                         cache_ptr = lz_matchptr;
1818
1819                         /*
1820                          * If there was a very long match found, then don't
1821                          * cache any matches for the bytes covered by that
1822                          * match.  This avoids degenerate behavior when
1823                          * compressing highly redundant data, where the number
1824                          * of matches can be very large.
1825                          *
1826                          * This heuristic doesn't actually hurt the compression
1827                          * ratio very much.  If there's a long match, then the
1828                          * data must be highly compressible, so it doesn't
1829                          * matter as much what we do.
1830                          */
1831                         if (best_len >= nice_len) {
1832                                 --best_len;
1833                                 do {
1834                                         if (unlikely(max_len > in_end - in_next)) {
1835                                                 max_len = in_end - in_next;
1836                                                 nice_len = min(max_len, nice_len);
1837                                                 if (unlikely(max_len < 3)) {
1838                                                         in_next++;
1839                                                         cache_ptr->length = 0;
1840                                                         cache_ptr++;
1841                                                         continue;
1842                                                 }
1843                                         }
1844                                         c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1845                                                 in_next - in_begin;
1846                                         bt_matchfinder_skip_position(&c->bt_mf,
1847                                                                      in_begin,
1848                                                                      in_next,
1849                                                                      in_end,
1850                                                                      nice_len,
1851                                                                      c->max_search_depth,
1852                                                                      &next_hash);
1853                                         in_next++;
1854                                         cache_ptr->length = 0;
1855                                         cache_ptr++;
1856                                 } while (--best_len);
1857                         }
1858                 } while (in_next < in_block_end &&
1859                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1860
1861                 /* We've finished running the block through the matchfinder.
1862                  * Now choose a match/literal sequence and write the block.  */
1863
1864                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1865                                                      in_next - in_block_begin,
1866                                                      queue);
1867         } while (in_next != in_end);
1868 }
1869
1870 /*
1871  * Given a pointer to the current byte sequence and the current list of recent
1872  * match offsets, find the longest repeat offset match.
1873  *
1874  * If no match of at least 2 bytes is found, then return 0.
1875  *
1876  * If a match of at least 2 bytes is found, then return its length and set
1877  * *rep_max_idx_ret to the index of its offset in @queue.
1878 */
1879 static unsigned
1880 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1881                                      const u32 bytes_remaining,
1882                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1883                                      unsigned *rep_max_idx_ret)
1884 {
1885         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1886         LZX_ASSERT(bytes_remaining >= 2);
1887
1888         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1889         const u16 next_2_bytes = load_u16_unaligned(in_next);
1890         const u8 *matchptr;
1891         unsigned rep_max_len;
1892         unsigned rep_max_idx;
1893         unsigned rep_len;
1894
1895         matchptr = in_next - recent_offsets[0];
1896         if (load_u16_unaligned(matchptr) == next_2_bytes)
1897                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1898         else
1899                 rep_max_len = 0;
1900         rep_max_idx = 0;
1901
1902         matchptr = in_next - recent_offsets[1];
1903         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1904                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1905                 if (rep_len > rep_max_len) {
1906                         rep_max_len = rep_len;
1907                         rep_max_idx = 1;
1908                 }
1909         }
1910
1911         matchptr = in_next - recent_offsets[2];
1912         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1913                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1914                 if (rep_len > rep_max_len) {
1915                         rep_max_len = rep_len;
1916                         rep_max_idx = 2;
1917                 }
1918         }
1919
1920         *rep_max_idx_ret = rep_max_idx;
1921         return rep_max_len;
1922 }
1923
1924 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1925 static inline unsigned
1926 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1927 {
1928         unsigned score = len;
1929
1930         if (adjusted_offset < 4096)
1931                 score++;
1932
1933         if (adjusted_offset < 256)
1934                 score++;
1935
1936         return score;
1937 }
1938
1939 static inline unsigned
1940 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1941 {
1942         return rep_len + 3;
1943 }
1944
1945 /* This is the "lazy" LZX compressor.  */
1946 static void
1947 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1948 {
1949         const u8 * const in_begin = c->in_buffer;
1950         const u8 *       in_next = in_begin;
1951         const u8 * const in_end  = in_begin + c->in_nbytes;
1952         unsigned max_len = LZX_MAX_MATCH_LEN;
1953         unsigned nice_len = min(c->nice_match_length, max_len);
1954         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1955         u32 recent_offsets[3] = {1, 1, 1};
1956         u32 next_hashes[2] = {};
1957
1958         hc_matchfinder_init(&c->hc_mf);
1959
1960         do {
1961                 /* Starting a new block  */
1962
1963                 const u8 * const in_block_begin = in_next;
1964                 const u8 * const in_block_end =
1965                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1966                 struct lzx_sequence *next_seq = c->chosen_sequences;
1967                 unsigned cur_len;
1968                 u32 cur_offset;
1969                 u32 cur_offset_data;
1970                 unsigned cur_score;
1971                 unsigned next_len;
1972                 u32 next_offset;
1973                 u32 next_offset_data;
1974                 unsigned next_score;
1975                 unsigned rep_max_len;
1976                 unsigned rep_max_idx;
1977                 unsigned rep_score;
1978                 unsigned skip_len;
1979                 u32 litrunlen = 0;
1980
1981                 lzx_reset_symbol_frequencies(c);
1982
1983                 do {
1984                         if (unlikely(max_len > in_end - in_next)) {
1985                                 max_len = in_end - in_next;
1986                                 nice_len = min(max_len, nice_len);
1987                         }
1988
1989                         /* Find the longest match at the current position.  */
1990
1991                         cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1992                                                                in_begin,
1993                                                                in_next - in_begin,
1994                                                                2,
1995                                                                max_len,
1996                                                                nice_len,
1997                                                                c->max_search_depth,
1998                                                                next_hashes,
1999                                                                &cur_offset);
2000                         if (cur_len < 3 ||
2001                             (cur_len == 3 &&
2002                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2003                              cur_offset != recent_offsets[0] &&
2004                              cur_offset != recent_offsets[1] &&
2005                              cur_offset != recent_offsets[2]))
2006                         {
2007                                 /* There was no match found, or the only match found
2008                                  * was a distant length 3 match.  Output a literal.  */
2009                                 lzx_record_literal(c, *in_next++, &litrunlen);
2010                                 continue;
2011                         }
2012
2013                         if (cur_offset == recent_offsets[0]) {
2014                                 in_next++;
2015                                 cur_offset_data = 0;
2016                                 skip_len = cur_len - 1;
2017                                 goto choose_cur_match;
2018                         }
2019
2020                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2021                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2022
2023                         /* Consider a repeat offset match  */
2024                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2025                                                                            in_end - in_next,
2026                                                                            recent_offsets,
2027                                                                            &rep_max_idx);
2028                         in_next++;
2029
2030                         if (rep_max_len >= 3 &&
2031                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2032                                                                        rep_max_idx)) >= cur_score)
2033                         {
2034                                 cur_len = rep_max_len;
2035                                 cur_offset_data = rep_max_idx;
2036                                 skip_len = rep_max_len - 1;
2037                                 goto choose_cur_match;
2038                         }
2039
2040                 have_cur_match:
2041
2042                         /* We have a match at the current position.  */
2043
2044                         /* If we have a very long match, choose it immediately.  */
2045                         if (cur_len >= nice_len) {
2046                                 skip_len = cur_len - 1;
2047                                 goto choose_cur_match;
2048                         }
2049
2050                         /* See if there's a better match at the next position.  */
2051
2052                         if (unlikely(max_len > in_end - in_next)) {
2053                                 max_len = in_end - in_next;
2054                                 nice_len = min(max_len, nice_len);
2055                         }
2056
2057                         next_len = hc_matchfinder_longest_match(&c->hc_mf,
2058                                                                 in_begin,
2059                                                                 in_next - in_begin,
2060                                                                 cur_len - 2,
2061                                                                 max_len,
2062                                                                 nice_len,
2063                                                                 c->max_search_depth / 2,
2064                                                                 next_hashes,
2065                                                                 &next_offset);
2066
2067                         if (next_len <= cur_len - 2) {
2068                                 in_next++;
2069                                 skip_len = cur_len - 2;
2070                                 goto choose_cur_match;
2071                         }
2072
2073                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2074                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2075
2076                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2077                                                                            in_end - in_next,
2078                                                                            recent_offsets,
2079                                                                            &rep_max_idx);
2080                         in_next++;
2081
2082                         if (rep_max_len >= 3 &&
2083                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2084                                                                        rep_max_idx)) >= next_score)
2085                         {
2086
2087                                 if (rep_score > cur_score) {
2088                                         /* The next match is better, and it's a
2089                                          * repeat offset match.  */
2090                                         lzx_record_literal(c, *(in_next - 2),
2091                                                            &litrunlen);
2092                                         cur_len = rep_max_len;
2093                                         cur_offset_data = rep_max_idx;
2094                                         skip_len = cur_len - 1;
2095                                         goto choose_cur_match;
2096                                 }
2097                         } else {
2098                                 if (next_score > cur_score) {
2099                                         /* The next match is better, and it's an
2100                                          * explicit offset match.  */
2101                                         lzx_record_literal(c, *(in_next - 2),
2102                                                            &litrunlen);
2103                                         cur_len = next_len;
2104                                         cur_offset_data = next_offset_data;
2105                                         cur_score = next_score;
2106                                         goto have_cur_match;
2107                                 }
2108                         }
2109
2110                         /* The original match was better.  */
2111                         skip_len = cur_len - 2;
2112
2113                 choose_cur_match:
2114                         lzx_record_match(c, cur_len, cur_offset_data,
2115                                          recent_offsets, &litrunlen, &next_seq);
2116                         in_next = hc_matchfinder_skip_positions(&c->hc_mf,
2117                                                                 in_begin,
2118                                                                 in_next - in_begin,
2119                                                                 in_end - in_begin,
2120                                                                 skip_len,
2121                                                                 next_hashes);
2122                 } while (in_next < in_block_end);
2123
2124                 lzx_finish_sequence(next_seq, litrunlen);
2125
2126                 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2127
2128         } while (in_next != in_end);
2129 }
2130
2131 /* Generate the acceleration tables for offset slots.  */
2132 static void
2133 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2134 {
2135         u32 adjusted_offset = 0;
2136         unsigned slot = 0;
2137
2138         /* slots [0, 29]  */
2139         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2140              adjusted_offset++)
2141         {
2142                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2143                         slot++;
2144                 c->offset_slot_tab_1[adjusted_offset] = slot;
2145         }
2146
2147         /* slots [30, 49]  */
2148         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2149              adjusted_offset += (u32)1 << 14)
2150         {
2151                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2152                         slot++;
2153                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2154         }
2155 }
2156
2157 static size_t
2158 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2159 {
2160         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2161                 return offsetof(struct lzx_compressor, hc_mf) +
2162                         hc_matchfinder_size(max_bufsize);
2163         } else {
2164                 return offsetof(struct lzx_compressor, bt_mf) +
2165                         bt_matchfinder_size(max_bufsize);
2166         }
2167 }
2168
2169 static u64
2170 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2171                       bool destructive)
2172 {
2173         u64 size = 0;
2174
2175         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2176                 return 0;
2177
2178         size += lzx_get_compressor_size(max_bufsize, compression_level);
2179         if (!destructive)
2180                 size += max_bufsize; /* in_buffer */
2181         return size;
2182 }
2183
2184 static int
2185 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2186                       bool destructive, void **c_ret)
2187 {
2188         unsigned window_order;
2189         struct lzx_compressor *c;
2190
2191         window_order = lzx_get_window_order(max_bufsize);
2192         if (window_order == 0)
2193                 return WIMLIB_ERR_INVALID_PARAM;
2194
2195         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2196         if (!c)
2197                 goto oom0;
2198
2199         c->destructive = destructive;
2200
2201         c->num_main_syms = lzx_get_num_main_syms(window_order);
2202         c->window_order = window_order;
2203
2204         if (!c->destructive) {
2205                 c->in_buffer = MALLOC(max_bufsize);
2206                 if (!c->in_buffer)
2207                         goto oom1;
2208         }
2209
2210         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2211
2212                 /* Fast compression: Use lazy parsing.  */
2213
2214                 c->impl = lzx_compress_lazy;
2215                 c->max_search_depth = (36 * compression_level) / 20;
2216                 c->nice_match_length = (72 * compression_level) / 20;
2217
2218                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2219                  * halves the max_search_depth when attempting a lazy match, and
2220                  * max_search_depth cannot be 0.  */
2221                 if (c->max_search_depth < 2)
2222                         c->max_search_depth = 2;
2223         } else {
2224
2225                 /* Normal / high compression: Use near-optimal parsing.  */
2226
2227                 c->impl = lzx_compress_near_optimal;
2228
2229                 /* Scale nice_match_length and max_search_depth with the
2230                  * compression level.  */
2231                 c->max_search_depth = (24 * compression_level) / 50;
2232                 c->nice_match_length = (32 * compression_level) / 50;
2233
2234                 /* Set a number of optimization passes appropriate for the
2235                  * compression level.  */
2236
2237                 c->num_optim_passes = 1;
2238
2239                 if (compression_level >= 45)
2240                         c->num_optim_passes++;
2241
2242                 /* Use more optimization passes for higher compression levels.
2243                  * But the more passes there are, the less they help --- so
2244                  * don't add them linearly.  */
2245                 if (compression_level >= 70) {
2246                         c->num_optim_passes++;
2247                         if (compression_level >= 100)
2248                                 c->num_optim_passes++;
2249                         if (compression_level >= 150)
2250                                 c->num_optim_passes++;
2251                         if (compression_level >= 200)
2252                                 c->num_optim_passes++;
2253                         if (compression_level >= 300)
2254                                 c->num_optim_passes++;
2255                 }
2256         }
2257
2258         /* max_search_depth == 0 is invalid.  */
2259         if (c->max_search_depth < 1)
2260                 c->max_search_depth = 1;
2261
2262         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2263                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2264
2265         lzx_init_offset_slot_tabs(c);
2266         *c_ret = c;
2267         return 0;
2268
2269 oom1:
2270         FREE(c);
2271 oom0:
2272         return WIMLIB_ERR_NOMEM;
2273 }
2274
2275 static size_t
2276 lzx_compress(const void *restrict in, size_t in_nbytes,
2277              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2278 {
2279         struct lzx_compressor *c = _c;
2280         struct lzx_output_bitstream os;
2281         size_t result;
2282
2283         /* Don't bother trying to compress very small inputs.  */
2284         if (in_nbytes < 100)
2285                 return 0;
2286
2287         /* Copy the input data into the internal buffer and preprocess it.  */
2288         if (c->destructive)
2289                 c->in_buffer = (void *)in;
2290         else
2291                 memcpy(c->in_buffer, in, in_nbytes);
2292         c->in_nbytes = in_nbytes;
2293         lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2294
2295         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2296         c->codes_index = 0;
2297         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2298
2299         /* Initialize the output bitstream.  */
2300         lzx_init_output(&os, out, out_nbytes_avail);
2301
2302         /* Call the compression level-specific compress() function.  */
2303         (*c->impl)(c, &os);
2304
2305         /* Flush the output bitstream and return the compressed size or 0.  */
2306         result = lzx_flush_output(&os);
2307         if (!result && c->destructive)
2308                 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2309         return result;
2310 }
2311
2312 static void
2313 lzx_free_compressor(void *_c)
2314 {
2315         struct lzx_compressor *c = _c;
2316
2317         if (!c->destructive)
2318                 FREE(c->in_buffer);
2319         FREE(c);
2320 }
2321
2322 const struct compressor_ops lzx_compressor_ops = {
2323         .get_needed_memory  = lzx_get_needed_memory,
2324         .create_compressor  = lzx_create_compressor,
2325         .compress           = lzx_compress,
2326         .free_compressor    = lzx_free_compressor,
2327 };