]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
lzx_compress.c: optimize output of items
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior (immediately terminating the block) on
83  * cache overflow is still required.
84  */
85 #define LZX_CACHE_PER_POS       7
86
87 /*
88  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89  * excluding the extra "overflow" entries.  The per-position multiplier is '1 +
90  * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91  * overhead of one lz_match per position, used to hold the match count at that
92  * position.
93  */
94 #define LZX_CACHE_LENGTH        (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
95
96 /*
97  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98  * ever be saved in the match cache for a single position.  Since each match we
99  * save for a single position has a distinct length, we can use the number of
100  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
101  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102  * it will never actually be reached.
103  */
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
105
106 /*
107  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108  * This makes it possible to consider fractional bit costs.
109  *
110  * Note: this is only useful as a statistical trick for when the true costs are
111  * unknown.  In reality, each token in LZX requires a whole number of bits to
112  * output.
113  */
114 #define LZX_BIT_COST            16
115
116 /*
117  * Consideration of aligned offset costs is disabled for now, due to
118  * insufficient benefit gained from the time spent.
119  */
120 #define LZX_CONSIDER_ALIGNED_COSTS      0
121
122 /*
123  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
124  * faster algorithm.
125  */
126 #define LZX_MAX_FAST_LEVEL      34
127
128 /*
129  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130  * for finding length 2 matches.  This can be as high as 16 (in which case the
131  * hash function is trivial), but using a smaller hash table speeds up
132  * compression due to reduced cache pressure.
133  */
134 #define LZX_HASH2_ORDER         12
135 #define LZX_HASH2_LENGTH        (1UL << LZX_HASH2_ORDER)
136
137 #include "wimlib/lzx_common.h"
138
139 /*
140  * The maximum allowed window order for the matchfinder.
141  */
142 #define MATCHFINDER_MAX_WINDOW_ORDER    LZX_MAX_WINDOW_ORDER
143
144 #include <string.h>
145
146 #include "wimlib/bt_matchfinder.h"
147 #include "wimlib/compress_common.h"
148 #include "wimlib/compressor_ops.h"
149 #include "wimlib/error.h"
150 #include "wimlib/hc_matchfinder.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/unaligned.h"
153 #include "wimlib/util.h"
154
155 struct lzx_output_bitstream;
156
157 /* Codewords for the LZX Huffman codes.  */
158 struct lzx_codewords {
159         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
160         u32 len[LZX_LENCODE_NUM_SYMBOLS];
161         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
162 };
163
164 /* Codeword lengths (in bits) for the LZX Huffman codes.
165  * A zero length means the corresponding codeword has zero frequency.  */
166 struct lzx_lens {
167         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
168         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
169         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
170 };
171
172 /* Cost model for near-optimal parsing  */
173 struct lzx_costs {
174
175         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
176          * length 'len' match that has an offset belonging to 'offset_slot'.  */
177         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
178
179         /* Cost for each symbol in the main code  */
180         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
181
182         /* Cost for each symbol in the length code  */
183         u32 len[LZX_LENCODE_NUM_SYMBOLS];
184
185 #if LZX_CONSIDER_ALIGNED_COSTS
186         /* Cost for each symbol in the aligned code  */
187         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
188 #endif
189 };
190
191 /* Codewords and lengths for the LZX Huffman codes.  */
192 struct lzx_codes {
193         struct lzx_codewords codewords;
194         struct lzx_lens lens;
195 };
196
197 /* Symbol frequency counters for the LZX Huffman codes.  */
198 struct lzx_freqs {
199         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
200         u32 len[LZX_LENCODE_NUM_SYMBOLS];
201         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
202 };
203
204 /*
205  * Represents a run of literals followed by a match or end-of-block.  This
206  * struct is needed to temporarily store items chosen by the parser, since items
207  * cannot be written until all items for the block have been chosen and the
208  * block's Huffman codes have been computed.
209  */
210 struct lzx_sequence {
211
212         /* The number of literals in the run.  This may be 0.  The literals are
213          * not stored explicitly in this structure; instead, they are read
214          * directly from the uncompressed data.  */
215         u16 litrunlen;
216
217         /* If the next field doesn't indicate end-of-block, then this is the
218          * match length minus LZX_MIN_MATCH_LEN.  */
219         u16 adjusted_length;
220
221         /* If bit 31 is clear, then this field contains the match header in bits
222          * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
223          * Otherwise, this sequence's literal run was the last literal run in
224          * the block, so there is no match that follows it.  */
225         u32 adjusted_offset_and_match_hdr;
226 };
227
228 /*
229  * This structure represents a byte position in the input buffer and a node in
230  * the graph of possible match/literal choices.
231  *
232  * Logically, each incoming edge to this node is labeled with a literal or a
233  * match that can be taken to reach this position from an earlier position; and
234  * each outgoing edge from this node is labeled with a literal or a match that
235  * can be taken to advance from this position to a later position.
236  */
237 struct lzx_optimum_node {
238
239         /* The cost, in bits, of the lowest-cost path that has been found to
240          * reach this position.  This can change as progressively lower cost
241          * paths are found to reach this position.  */
242         u32 cost;
243
244         /*
245          * The match or literal that was taken to reach this position.  This can
246          * change as progressively lower cost paths are found to reach this
247          * position.
248          *
249          * This variable is divided into two bitfields.
250          *
251          * Literals:
252          *      Low bits are 0, high bits are the literal.
253          *
254          * Explicit offset matches:
255          *      Low bits are the match length, high bits are the offset plus 2.
256          *
257          * Repeat offset matches:
258          *      Low bits are the match length, high bits are the queue index.
259          */
260         u32 item;
261 #define OPTIMUM_OFFSET_SHIFT 9
262 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
263 } _aligned_attribute(8);
264
265 /*
266  * Least-recently-used queue for match offsets.
267  *
268  * This is represented as a 64-bit integer for efficiency.  There are three
269  * offsets of 21 bits each.  Bit 64 is garbage.
270  */
271 struct lzx_lru_queue {
272         u64 R;
273 };
274
275 #define LZX_QUEUE64_OFFSET_SHIFT 21
276 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
277
278 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
279 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
280 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
281
282 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
283 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
284 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
285
286 static inline void
287 lzx_lru_queue_init(struct lzx_lru_queue *queue)
288 {
289         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
290                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
291                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
292 }
293
294 static inline u64
295 lzx_lru_queue_R0(struct lzx_lru_queue queue)
296 {
297         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
298 }
299
300 static inline u64
301 lzx_lru_queue_R1(struct lzx_lru_queue queue)
302 {
303         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
304 }
305
306 static inline u64
307 lzx_lru_queue_R2(struct lzx_lru_queue queue)
308 {
309         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
310 }
311
312 /* Push a match offset onto the front (most recently used) end of the queue.  */
313 static inline struct lzx_lru_queue
314 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
315 {
316         return (struct lzx_lru_queue) {
317                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
318         };
319 }
320
321 /* Pop a match offset off the front (most recently used) end of the queue.  */
322 static inline u32
323 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
324 {
325         u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
326         queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
327         return offset;
328 }
329
330 /* Swap a match offset to the front of the queue.  */
331 static inline struct lzx_lru_queue
332 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
333 {
334         if (idx == 0)
335                 return queue;
336
337         if (idx == 1)
338                 return (struct lzx_lru_queue) {
339                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
340                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
341                              (queue.R & LZX_QUEUE64_R2_MASK),
342                 };
343
344         return (struct lzx_lru_queue) {
345                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
346                      (queue.R & LZX_QUEUE64_R1_MASK) |
347                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
348         };
349 }
350
351 /* The main LZX compressor structure  */
352 struct lzx_compressor {
353
354         /* The "nice" match length: if a match of this length is found, then
355          * choose it immediately without further consideration.  */
356         unsigned nice_match_length;
357
358         /* The maximum search depth: consider at most this many potential
359          * matches at each position.  */
360         unsigned max_search_depth;
361
362         /* The log base 2 of the LZX window size for LZ match offset encoding
363          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
364          * LZX_MAX_WINDOW_ORDER.  */
365         unsigned window_order;
366
367         /* The number of symbols in the main alphabet.  This depends on
368          * @window_order, since @window_order determines the maximum possible
369          * offset.  */
370         unsigned num_main_syms;
371
372         /* Number of optimization passes per block  */
373         unsigned num_optim_passes;
374
375         /* The preprocessed buffer of data being compressed  */
376         u8 *in_buffer;
377
378         /* The number of bytes of data to be compressed, which is the number of
379          * bytes of data in @in_buffer that are actually valid.  */
380         size_t in_nbytes;
381
382         /* Pointer to the compress() implementation chosen at allocation time */
383         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
384
385         /* If true, the compressor need not preserve the input buffer if it
386          * compresses the data successfully.  */
387         bool destructive;
388
389         /* The Huffman symbol frequency counters for the current block.  */
390         struct lzx_freqs freqs;
391
392         /* The Huffman codes for the current and previous blocks.  The one with
393          * index 'codes_index' is for the current block, and the other one is
394          * for the previous block.  */
395         struct lzx_codes codes[2];
396         unsigned codes_index;
397
398         /* The matches and literals that the parser has chosen for the current
399          * block.  The required length of this array is limited by the maximum
400          * number of matches that can ever be chosen for a single block.  */
401         struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
402
403         /* Tables for mapping adjusted offsets to offset slots  */
404
405         /* offset slots [0, 29]  */
406         u8 offset_slot_tab_1[32768];
407
408         /* offset slots [30, 49]  */
409         u8 offset_slot_tab_2[128];
410
411         union {
412                 /* Data for greedy or lazy parsing  */
413                 struct {
414                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
415                         struct hc_matchfinder hc_mf;
416                 };
417
418                 /* Data for near-optimal parsing  */
419                 struct {
420                         /*
421                          * The graph nodes for the current block.
422                          *
423                          * We need at least 'LZX_DIV_BLOCK_SIZE +
424                          * LZX_MAX_MATCH_LEN - 1' nodes because that is the
425                          * maximum block size that may be used.  Add 1 because
426                          * we need a node to represent end-of-block.
427                          *
428                          * It is possible that nodes past end-of-block are
429                          * accessed during match consideration, but this can
430                          * only occur if the block was truncated at
431                          * LZX_DIV_BLOCK_SIZE.  So the same bound still applies.
432                          * Note that since nodes past the end of the block will
433                          * never actually have an effect on the items that are
434                          * chosen for the block, it makes no difference what
435                          * their costs are initialized to (if anything).
436                          */
437                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
438                                                               LZX_MAX_MATCH_LEN - 1 + 1];
439
440                         /* The cost model for the current block  */
441                         struct lzx_costs costs;
442
443                         /*
444                          * Cached matches for the current block.  This array
445                          * contains the matches that were found at each position
446                          * in the block.  Specifically, for each position, there
447                          * is a special 'struct lz_match' whose 'length' field
448                          * contains the number of matches that were found at
449                          * that position; this is followed by the matches
450                          * themselves, if any, sorted by strictly increasing
451                          * length.
452                          *
453                          * Note: in rare cases, there will be a very high number
454                          * of matches in the block and this array will overflow.
455                          * If this happens, we force the end of the current
456                          * block.  LZX_CACHE_LENGTH is the length at which we
457                          * actually check for overflow.  The extra slots beyond
458                          * this are enough to absorb the worst case overflow,
459                          * which occurs if starting at
460                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
461                          * match count header, then write
462                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
463                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
464                          * write the match count header for each.
465                          */
466                         struct lz_match match_cache[LZX_CACHE_LENGTH +
467                                                     LZX_MAX_MATCHES_PER_POS +
468                                                     LZX_MAX_MATCH_LEN - 1];
469
470                         /* Hash table for finding length 2 matches  */
471                         pos_t hash2_tab[LZX_HASH2_LENGTH];
472
473                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
474                         struct bt_matchfinder bt_mf;
475                 };
476         };
477 };
478
479 /*
480  * Structure to keep track of the current state of sending bits to the
481  * compressed output buffer.
482  *
483  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
484  */
485 struct lzx_output_bitstream {
486
487         /* Bits that haven't yet been written to the output buffer.  */
488         u32 bitbuf;
489
490         /* Number of bits currently held in @bitbuf.  */
491         u32 bitcount;
492
493         /* Pointer to the start of the output buffer.  */
494         u8 *start;
495
496         /* Pointer to the position in the output buffer at which the next coding
497          * unit should be written.  */
498         u8 *next;
499
500         /* Pointer just past the end of the output buffer, rounded down to a
501          * 2-byte boundary.  */
502         u8 *end;
503 };
504
505 /*
506  * Initialize the output bitstream.
507  *
508  * @os
509  *      The output bitstream structure to initialize.
510  * @buffer
511  *      The buffer being written to.
512  * @size
513  *      Size of @buffer, in bytes.
514  */
515 static void
516 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
517 {
518         os->bitbuf = 0;
519         os->bitcount = 0;
520         os->start = buffer;
521         os->next = os->start;
522         os->end = os->start + (size & ~1);
523 }
524
525 /*
526  * Write some bits to the output bitstream.
527  *
528  * The bits are given by the low-order @num_bits bits of @bits.  Higher-order
529  * bits in @bits cannot be set.  At most 17 bits can be written at once.
530  *
531  * @max_num_bits is a compile-time constant that specifies the maximum number of
532  * bits that can ever be written at the call site.  It is used to optimize away
533  * the conditional code for writing a second 16-bit coding unit when writing
534  * fewer than 17 bits.
535  *
536  * If the output buffer space is exhausted, then the bits will be ignored, and
537  * lzx_flush_output() will return 0 when it gets called.
538  */
539 static inline void
540 lzx_write_varbits(struct lzx_output_bitstream *os,
541                   const u32 bits, const unsigned num_bits,
542                   const unsigned max_num_bits)
543 {
544         /* This code is optimized for LZX, which never needs to write more than
545          * 17 bits at once.  */
546         LZX_ASSERT(num_bits <= 17);
547         LZX_ASSERT(num_bits <= max_num_bits);
548         LZX_ASSERT(os->bitcount <= 15);
549
550         /* Add the bits to the bit buffer variable.  @bitcount will be at most
551          * 15, so there will be just enough space for the maximum possible
552          * @num_bits of 17.  */
553         os->bitcount += num_bits;
554         os->bitbuf = (os->bitbuf << num_bits) | bits;
555
556         /* Check whether any coding units need to be written.  */
557         if (os->bitcount >= 16) {
558
559                 os->bitcount -= 16;
560
561                 /* Write a coding unit, unless it would overflow the buffer.  */
562                 if (os->next != os->end) {
563                         put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next);
564                         os->next += 2;
565                 }
566
567                 /* If writing 17 bits, a second coding unit might need to be
568                  * written.  But because 'max_num_bits' is a compile-time
569                  * constant, the compiler will optimize away this code at most
570                  * call sites.  */
571                 if (max_num_bits == 17 && os->bitcount == 16) {
572                         if (os->next != os->end) {
573                                 put_unaligned_u16_le(os->bitbuf, os->next);
574                                 os->next += 2;
575                         }
576                         os->bitcount = 0;
577                 }
578         }
579 }
580
581 /* Use when @num_bits is a compile-time constant.  Otherwise use
582  * lzx_write_varbits().  */
583 static inline void
584 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
585 {
586         lzx_write_varbits(os, bits, num_bits, num_bits);
587 }
588
589 /*
590  * Flush the last coding unit to the output buffer if needed.  Return the total
591  * number of bytes written to the output buffer, or 0 if an overflow occurred.
592  */
593 static u32
594 lzx_flush_output(struct lzx_output_bitstream *os)
595 {
596         if (os->next == os->end)
597                 return 0;
598
599         if (os->bitcount != 0) {
600                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
601                 os->next += 2;
602         }
603
604         return os->next - os->start;
605 }
606
607 /* Build the main, length, and aligned offset Huffman codes used in LZX.
608  *
609  * This takes as input the frequency tables for each code and produces as output
610  * a set of tables that map symbols to codewords and codeword lengths.  */
611 static void
612 lzx_make_huffman_codes(struct lzx_compressor *c)
613 {
614         const struct lzx_freqs *freqs = &c->freqs;
615         struct lzx_codes *codes = &c->codes[c->codes_index];
616
617         make_canonical_huffman_code(c->num_main_syms,
618                                     LZX_MAX_MAIN_CODEWORD_LEN,
619                                     freqs->main,
620                                     codes->lens.main,
621                                     codes->codewords.main);
622
623         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
624                                     LZX_MAX_LEN_CODEWORD_LEN,
625                                     freqs->len,
626                                     codes->lens.len,
627                                     codes->codewords.len);
628
629         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
630                                     LZX_MAX_ALIGNED_CODEWORD_LEN,
631                                     freqs->aligned,
632                                     codes->lens.aligned,
633                                     codes->codewords.aligned);
634 }
635
636 /* Reset the symbol frequencies for the LZX Huffman codes.  */
637 static void
638 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
639 {
640         memset(&c->freqs, 0, sizeof(c->freqs));
641 }
642
643 static unsigned
644 lzx_compute_precode_items(const u8 lens[restrict],
645                           const u8 prev_lens[restrict],
646                           u32 precode_freqs[restrict],
647                           unsigned precode_items[restrict])
648 {
649         unsigned *itemptr;
650         unsigned run_start;
651         unsigned run_end;
652         unsigned extra_bits;
653         int delta;
654         u8 len;
655
656         itemptr = precode_items;
657         run_start = 0;
658
659         while (!((len = lens[run_start]) & 0x80)) {
660
661                 /* len = the length being repeated  */
662
663                 /* Find the next run of codeword lengths.  */
664
665                 run_end = run_start + 1;
666
667                 /* Fast case for a single length.  */
668                 if (likely(len != lens[run_end])) {
669                         delta = prev_lens[run_start] - len;
670                         if (delta < 0)
671                                 delta += 17;
672                         precode_freqs[delta]++;
673                         *itemptr++ = delta;
674                         run_start++;
675                         continue;
676                 }
677
678                 /* Extend the run.  */
679                 do {
680                         run_end++;
681                 } while (len == lens[run_end]);
682
683                 if (len == 0) {
684                         /* Run of zeroes.  */
685
686                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
687                         while ((run_end - run_start) >= 20) {
688                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
689                                 precode_freqs[18]++;
690                                 *itemptr++ = 18 | (extra_bits << 5);
691                                 run_start += 20 + extra_bits;
692                         }
693
694                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
695                         if ((run_end - run_start) >= 4) {
696                                 extra_bits = min((run_end - run_start) - 4, 0xf);
697                                 precode_freqs[17]++;
698                                 *itemptr++ = 17 | (extra_bits << 5);
699                                 run_start += 4 + extra_bits;
700                         }
701                 } else {
702
703                         /* A run of nonzero lengths. */
704
705                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
706                         while ((run_end - run_start) >= 4) {
707                                 extra_bits = (run_end - run_start) > 4;
708                                 delta = prev_lens[run_start] - len;
709                                 if (delta < 0)
710                                         delta += 17;
711                                 precode_freqs[19]++;
712                                 precode_freqs[delta]++;
713                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
714                                 run_start += 4 + extra_bits;
715                         }
716                 }
717
718                 /* Output any remaining lengths without RLE.  */
719                 while (run_start != run_end) {
720                         delta = prev_lens[run_start] - len;
721                         if (delta < 0)
722                                 delta += 17;
723                         precode_freqs[delta]++;
724                         *itemptr++ = delta;
725                         run_start++;
726                 }
727         }
728
729         return itemptr - precode_items;
730 }
731
732 /*
733  * Output a Huffman code in the compressed form used in LZX.
734  *
735  * The Huffman code is represented in the output as a logical series of codeword
736  * lengths from which the Huffman code, which must be in canonical form, can be
737  * reconstructed.
738  *
739  * The codeword lengths are themselves compressed using a separate Huffman code,
740  * the "precode", which contains a symbol for each possible codeword length in
741  * the larger code as well as several special symbols to represent repeated
742  * codeword lengths (a form of run-length encoding).  The precode is itself
743  * constructed in canonical form, and its codeword lengths are represented
744  * literally in 20 4-bit fields that immediately precede the compressed codeword
745  * lengths of the larger code.
746  *
747  * Furthermore, the codeword lengths of the larger code are actually represented
748  * as deltas from the codeword lengths of the corresponding code in the previous
749  * block.
750  *
751  * @os:
752  *      Bitstream to which to write the compressed Huffman code.
753  * @lens:
754  *      The codeword lengths, indexed by symbol, in the Huffman code.
755  * @prev_lens:
756  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
757  *      code in the previous block, or all zeroes if this is the first block.
758  * @num_lens:
759  *      The number of symbols in the Huffman code.
760  */
761 static void
762 lzx_write_compressed_code(struct lzx_output_bitstream *os,
763                           const u8 lens[restrict],
764                           const u8 prev_lens[restrict],
765                           unsigned num_lens)
766 {
767         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
768         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
769         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
770         unsigned precode_items[num_lens];
771         unsigned num_precode_items;
772         unsigned precode_item;
773         unsigned precode_sym;
774         unsigned i;
775         u8 saved = lens[num_lens];
776         *(u8 *)(lens + num_lens) = 0x80;
777
778         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
779                 precode_freqs[i] = 0;
780
781         /* Compute the "items" (RLE / literal tokens and extra bits) with which
782          * the codeword lengths in the larger code will be output.  */
783         num_precode_items = lzx_compute_precode_items(lens,
784                                                       prev_lens,
785                                                       precode_freqs,
786                                                       precode_items);
787
788         /* Build the precode.  */
789         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
790                                     LZX_MAX_PRE_CODEWORD_LEN,
791                                     precode_freqs, precode_lens,
792                                     precode_codewords);
793
794         /* Output the lengths of the codewords in the precode.  */
795         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
796                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
797
798         /* Output the encoded lengths of the codewords in the larger code.  */
799         for (i = 0; i < num_precode_items; i++) {
800                 precode_item = precode_items[i];
801                 precode_sym = precode_item & 0x1F;
802                 lzx_write_varbits(os, precode_codewords[precode_sym],
803                                   precode_lens[precode_sym],
804                                   LZX_MAX_PRE_CODEWORD_LEN);
805                 if (precode_sym >= 17) {
806                         if (precode_sym == 17) {
807                                 lzx_write_bits(os, precode_item >> 5, 4);
808                         } else if (precode_sym == 18) {
809                                 lzx_write_bits(os, precode_item >> 5, 5);
810                         } else {
811                                 lzx_write_bits(os, (precode_item >> 5) & 1, 1);
812                                 precode_sym = precode_item >> 6;
813                                 lzx_write_varbits(os, precode_codewords[precode_sym],
814                                                   precode_lens[precode_sym],
815                                                   LZX_MAX_PRE_CODEWORD_LEN);
816                         }
817                 }
818         }
819
820         *(u8 *)(lens + num_lens) = saved;
821 }
822
823 /*
824  * Write all matches and literal bytes (which were precomputed) in an LZX
825  * compressed block to the output bitstream in the final compressed
826  * representation.
827  *
828  * @os
829  *      The output bitstream.
830  * @block_type
831  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
832  *      LZX_BLOCKTYPE_VERBATIM).
833  * @block_data
834  *      The uncompressed data of the block.
835  * @sequences
836  *      The matches and literals to output, given as a series of sequences.
837  * @codes
838  *      The main, length, and aligned offset Huffman codes for the current
839  *      LZX compressed block.
840  */
841 static void
842 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
843                     const u8 *block_data, const struct lzx_sequence sequences[],
844                     const struct lzx_codes *codes)
845 {
846         const struct lzx_sequence *seq = sequences;
847         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
848
849         for (;;) {
850                 /* Output the next sequence.  */
851
852                 unsigned litrunlen = seq->litrunlen;
853                 unsigned match_hdr;
854                 unsigned main_symbol;
855                 unsigned adjusted_length;
856                 u32 adjusted_offset;
857                 unsigned offset_slot;
858                 unsigned num_extra_bits;
859                 u32 extra_bits;
860
861                 /* Output the literal run of the sequence.  */
862
863                 if (litrunlen) {
864                         do {
865                                 unsigned lit = *block_data++;
866                                 lzx_write_varbits(os, codes->codewords.main[lit],
867                                                   codes->lens.main[lit],
868                                                   LZX_MAX_MAIN_CODEWORD_LEN);
869                         } while (--litrunlen);
870                 }
871
872                 /* Was this the last literal run?  */
873                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
874                         return;
875
876                 /* Nope; output the match.  */
877
878                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
879                 main_symbol = LZX_NUM_CHARS + match_hdr;
880                 adjusted_length = seq->adjusted_length;
881
882                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
883
884                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
885                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
886
887                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
888                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
889
890                 /* Output the main symbol for the match.  */
891                 lzx_write_varbits(os, codes->codewords.main[main_symbol],
892                                   codes->lens.main[main_symbol],
893                                   LZX_MAX_MAIN_CODEWORD_LEN);
894
895                 /* If needed, output the length symbol for the match.  */
896
897                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
898                         lzx_write_varbits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
899                                           codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
900                                           LZX_MAX_LEN_CODEWORD_LEN);
901                 }
902
903                 /* Output the extra offset bits for the match.  In aligned
904                  * offset blocks, the lowest 3 bits of the adjusted offset are
905                  * Huffman-encoded using the aligned offset code, provided that
906                  * there are at least extra 3 offset bits required.  All other
907                  * extra offset bits are output verbatim.  */
908
909                 if ((adjusted_offset & ones_if_aligned) >= 16) {
910
911                         lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
912                                           num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
913                                           14);
914
915                         lzx_write_varbits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
916                                           codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
917                                           LZX_MAX_ALIGNED_CODEWORD_LEN);
918                 } else {
919                         lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
920                 }
921
922                 /* Advance to the next sequence.  */
923                 seq++;
924         }
925 }
926
927 static void
928 lzx_write_compressed_block(const u8 *block_begin,
929                            int block_type,
930                            u32 block_size,
931                            unsigned window_order,
932                            unsigned num_main_syms,
933                            const struct lzx_sequence sequences[],
934                            const struct lzx_codes * codes,
935                            const struct lzx_lens * prev_lens,
936                            struct lzx_output_bitstream * os)
937 {
938         LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
939                    block_type == LZX_BLOCKTYPE_VERBATIM);
940
941         /* The first three bits indicate the type of block and are one of the
942          * LZX_BLOCKTYPE_* constants.  */
943         lzx_write_bits(os, block_type, 3);
944
945         /* Output the block size.
946          *
947          * The original LZX format seemed to always encode the block size in 3
948          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
949          * uses the first bit to indicate whether the block is the default size
950          * (32768) or a different size given explicitly by the next 16 bits.
951          *
952          * By default, this compressor uses a window size of 32768 and therefore
953          * follows the WIMGAPI behavior.  However, this compressor also supports
954          * window sizes greater than 32768 bytes, which do not appear to be
955          * supported by WIMGAPI.  In such cases, we retain the default size bit
956          * to mean a size of 32768 bytes but output non-default block size in 24
957          * bits rather than 16.  The compatibility of this behavior is unknown
958          * because WIMs created with chunk size greater than 32768 can seemingly
959          * only be opened by wimlib anyway.  */
960         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
961                 lzx_write_bits(os, 1, 1);
962         } else {
963                 lzx_write_bits(os, 0, 1);
964
965                 if (window_order >= 16)
966                         lzx_write_bits(os, block_size >> 16, 8);
967
968                 lzx_write_bits(os, block_size & 0xFFFF, 16);
969         }
970
971         /* If it's an aligned offset block, output the aligned offset code.  */
972         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
973                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
974                         lzx_write_bits(os, codes->lens.aligned[i],
975                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
976                 }
977         }
978
979         /* Output the main code (two parts).  */
980         lzx_write_compressed_code(os, codes->lens.main,
981                                   prev_lens->main,
982                                   LZX_NUM_CHARS);
983         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
984                                   prev_lens->main + LZX_NUM_CHARS,
985                                   num_main_syms - LZX_NUM_CHARS);
986
987         /* Output the length code.  */
988         lzx_write_compressed_code(os, codes->lens.len,
989                                   prev_lens->len,
990                                   LZX_LENCODE_NUM_SYMBOLS);
991
992         /* Output the compressed matches and literals.  */
993         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
994 }
995
996 /* Given the frequencies of symbols in an LZX-compressed block and the
997  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
998  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
999  * will take fewer bits to output.  */
1000 static int
1001 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1002                                const struct lzx_codes * codes)
1003 {
1004         u32 aligned_cost = 0;
1005         u32 verbatim_cost = 0;
1006
1007         /* A verbatim block requires 3 bits in each place that an aligned symbol
1008          * would be used in an aligned offset block.  */
1009         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1010                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1011                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1012         }
1013
1014         /* Account for output of the aligned offset code.  */
1015         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1016
1017         if (aligned_cost < verbatim_cost)
1018                 return LZX_BLOCKTYPE_ALIGNED;
1019         else
1020                 return LZX_BLOCKTYPE_VERBATIM;
1021 }
1022
1023 /*
1024  * Return the offset slot for the specified adjusted match offset, using the
1025  * compressor's acceleration tables to speed up the mapping.
1026  */
1027 static inline unsigned
1028 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
1029 {
1030         if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1031                 return c->offset_slot_tab_1[adjusted_offset];
1032         return c->offset_slot_tab_2[adjusted_offset >> 14];
1033 }
1034
1035 /*
1036  * Finish an LZX block:
1037  *
1038  * - build the Huffman codes
1039  * - decide whether to output the block as VERBATIM or ALIGNED
1040  * - output the block
1041  * - swap the indices of the current and previous Huffman codes
1042  */
1043 static void
1044 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1045                  const u8 *block_begin, u32 block_size, u32 seq_idx)
1046 {
1047         int block_type;
1048
1049         lzx_make_huffman_codes(c);
1050
1051         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1052                                                     &c->codes[c->codes_index]);
1053         lzx_write_compressed_block(block_begin,
1054                                    block_type,
1055                                    block_size,
1056                                    c->window_order,
1057                                    c->num_main_syms,
1058                                    &c->chosen_sequences[seq_idx],
1059                                    &c->codes[c->codes_index],
1060                                    &c->codes[c->codes_index ^ 1].lens,
1061                                    os);
1062         c->codes_index ^= 1;
1063 }
1064
1065 /* Tally the Huffman symbol for a literal and increment the literal run length.
1066  */
1067 static inline void
1068 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1069 {
1070         c->freqs.main[literal]++;
1071         ++*litrunlen_p;
1072 }
1073
1074 /* Tally the Huffman symbol for a match, save the match data and the length of
1075  * the preceding literal run in the next lzx_sequence, and update the recent
1076  * offsets queue.  */
1077 static inline void
1078 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1079                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1080                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1081 {
1082         u32 litrunlen = *litrunlen_p;
1083         struct lzx_sequence *next_seq = *next_seq_p;
1084         unsigned offset_slot;
1085         unsigned v;
1086
1087         v = length - LZX_MIN_MATCH_LEN;
1088
1089         /* Save the literal run length and adjusted length.  */
1090         next_seq->litrunlen = litrunlen;
1091         next_seq->adjusted_length = v;
1092
1093         /* Compute the length header and tally the length symbol if needed  */
1094         if (v >= LZX_NUM_PRIMARY_LENS) {
1095                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1096                 v = LZX_NUM_PRIMARY_LENS;
1097         }
1098
1099         /* Compute the offset slot  */
1100         offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1101
1102         /* Compute the match header.  */
1103         v += offset_slot * LZX_NUM_LEN_HEADERS;
1104
1105         /* Save the adjusted offset and match header.  */
1106         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1107
1108         /* Tally the main symbol.  */
1109         c->freqs.main[LZX_NUM_CHARS + v]++;
1110
1111         /* Update the recent offsets queue.  */
1112         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1113                 /* Repeat offset match  */
1114                 swap(recent_offsets[0], recent_offsets[offset_data]);
1115         } else {
1116                 /* Explicit offset match  */
1117
1118                 /* Tally the aligned offset symbol if needed  */
1119                 if (offset_data >= 16)
1120                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1121
1122                 recent_offsets[2] = recent_offsets[1];
1123                 recent_offsets[1] = recent_offsets[0];
1124                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1125         }
1126
1127         /* Reset the literal run length and advance to the next sequence.  */
1128         *next_seq_p = next_seq + 1;
1129         *litrunlen_p = 0;
1130 }
1131
1132 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1133  * there is no match.  This literal run may be empty.  */
1134 static inline void
1135 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1136 {
1137         last_seq->litrunlen = litrunlen;
1138
1139         /* Special value to mark last sequence  */
1140         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1141 }
1142
1143 /*
1144  * Given the minimum-cost path computed through the item graph for the current
1145  * block, walk the path and count how many of each symbol in each Huffman-coded
1146  * alphabet would be required to output the items (matches and literals) along
1147  * the path.
1148  *
1149  * Note that the path will be walked backwards (from the end of the block to the
1150  * beginning of the block), but this doesn't matter because this function only
1151  * computes frequencies.
1152  */
1153 static void
1154 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
1155 {
1156         u32 node_idx = block_size;
1157         for (;;) {
1158                 u32 len;
1159                 u32 offset_data;
1160                 unsigned v;
1161                 unsigned offset_slot;
1162
1163                 /* Tally literals until either a match or the beginning of the
1164                  * block is reached.  */
1165                 for (;;) {
1166                         u32 item = c->optimum_nodes[node_idx].item;
1167
1168                         len = item & OPTIMUM_LEN_MASK;
1169                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1170
1171                         if (len != 0)  /* Not a literal?  */
1172                                 break;
1173
1174                         /* Tally the main symbol for the literal.  */
1175                         c->freqs.main[offset_data]++;
1176
1177                         if (--node_idx == 0) /* Beginning of block was reached?  */
1178                                 return;
1179                 }
1180
1181                 node_idx -= len;
1182
1183                 /* Tally a match.  */
1184
1185                 /* Tally the aligned offset symbol if needed.  */
1186                 if (offset_data >= 16)
1187                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1188
1189                 /* Tally the length symbol if needed.  */
1190                 v = len - LZX_MIN_MATCH_LEN;;
1191                 if (v >= LZX_NUM_PRIMARY_LENS) {
1192                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1193                         v = LZX_NUM_PRIMARY_LENS;
1194                 }
1195
1196                 /* Tally the main symbol.  */
1197                 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1198                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1199                 c->freqs.main[LZX_NUM_CHARS + v]++;
1200
1201                 if (node_idx == 0) /* Beginning of block was reached?  */
1202                         return;
1203         }
1204 }
1205
1206 /*
1207  * Like lzx_tally_item_list(), but this function also generates the list of
1208  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1209  * ready to be output to the bitstream after the Huffman codes are computed.
1210  * The lzx_sequences will be written to decreasing memory addresses as the path
1211  * is walked backwards, which means they will end up in the expected
1212  * first-to-last order.  The return value is the index in c->chosen_sequences at
1213  * which the lzx_sequences begin.
1214  */
1215 static u32
1216 lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
1217 {
1218         u32 node_idx = block_size;
1219         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1220         u32 lit_start_node;
1221
1222         /* Special value to mark last sequence  */
1223         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1224
1225         lit_start_node = node_idx;
1226         for (;;) {
1227                 u32 len;
1228                 u32 offset_data;
1229                 unsigned v;
1230                 unsigned offset_slot;
1231
1232                 /* Record literals until either a match or the beginning of the
1233                  * block is reached.  */
1234                 for (;;) {
1235                         u32 item = c->optimum_nodes[node_idx].item;
1236
1237                         len = item & OPTIMUM_LEN_MASK;
1238                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1239
1240                         if (len != 0) /* Not a literal?  */
1241                                 break;
1242
1243                         /* Tally the main symbol for the literal.  */
1244                         c->freqs.main[offset_data]++;
1245
1246                         if (--node_idx == 0) /* Beginning of block was reached?  */
1247                                 goto out;
1248                 }
1249
1250                 /* Save the literal run length for the next sequence (the
1251                  * "previous sequence" when walking backwards).  */
1252                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1253                 node_idx -= len;
1254                 lit_start_node = node_idx;
1255
1256                 /* Record a match.  */
1257
1258                 /* Tally the aligned offset symbol if needed.  */
1259                 if (offset_data >= 16)
1260                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1261
1262                 /* Save the adjusted length.  */
1263                 v = len - LZX_MIN_MATCH_LEN;
1264                 c->chosen_sequences[seq_idx].adjusted_length = v;
1265
1266                 /* Tally the length symbol if needed.  */
1267                 if (v >= LZX_NUM_PRIMARY_LENS) {
1268                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1269                         v = LZX_NUM_PRIMARY_LENS;
1270                 }
1271
1272                 /* Tally the main symbol.  */
1273                 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1274                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1275                 c->freqs.main[LZX_NUM_CHARS + v]++;
1276
1277                 /* Save the adjusted offset and match header.  */
1278                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1279                                 (offset_data << 9) | v;
1280
1281                 if (node_idx == 0) /* Beginning of block was reached?  */
1282                         goto out;
1283         }
1284
1285 out:
1286         /* Save the literal run length for the first sequence.  */
1287         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1288
1289         /* Return the index in c->chosen_sequences at which the lzx_sequences
1290          * begin.  */
1291         return seq_idx;
1292 }
1293
1294 /*
1295  * Find an inexpensive path through the graph of possible match/literal choices
1296  * for the current block.  The nodes of the graph are
1297  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1298  * the current block, plus one extra node for end-of-block.  The edges of the
1299  * graph are matches and literals.  The goal is to find the minimum cost path
1300  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1301  *
1302  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1303  * proceeding forwards one node at a time.  At each node, a selection of matches
1304  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1305  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1306  * such a path is the lowest cost found so far to reach that later node, then
1307  * that later node is updated with the new path.
1308  *
1309  * Note that although this algorithm is based on minimum cost path search, due
1310  * to various simplifying assumptions the result is not guaranteed to be the
1311  * true minimum cost, or "optimal", path over the graph of all valid LZX
1312  * representations of this block.
1313  *
1314  * Also, note that because of the presence of the recent offsets queue (which is
1315  * a type of adaptive state), the algorithm cannot work backwards and compute
1316  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1317  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1318  * only an approximation.  It's possible for the globally optimal, minimum cost
1319  * path to contain a prefix, ending at a position, where that path prefix is
1320  * *not* the minimum cost path to that position.  This can happen if such a path
1321  * prefix results in a different adaptive state which results in lower costs
1322  * later.  The algorithm does not solve this problem; it only considers the
1323  * lowest cost to reach each individual position.
1324  */
1325 static struct lzx_lru_queue
1326 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1327                        const u8 * const restrict block_begin,
1328                        const u32 block_size,
1329                        const struct lzx_lru_queue initial_queue)
1330 {
1331         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1332         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1333         struct lz_match *cache_ptr = c->match_cache;
1334         const u8 *in_next = block_begin;
1335         const u8 * const block_end = block_begin + block_size;
1336
1337         /* Instead of storing the match offset LRU queues in the
1338          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1339          * storing them in a smaller array.  This works because the algorithm
1340          * only requires a limited history of the adaptive state.  Once a given
1341          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1342          * it is no longer needed.  */
1343         struct lzx_lru_queue queues[512];
1344
1345         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1346 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1347
1348         /* Initially, the cost to reach each node is "infinity".  */
1349         memset(c->optimum_nodes, 0xFF,
1350                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1351
1352         QUEUE(block_begin) = initial_queue;
1353
1354         /* The following loop runs 'block_size' iterations, one per node.  */
1355         do {
1356                 unsigned num_matches;
1357                 unsigned literal;
1358                 u32 cost;
1359
1360                 /*
1361                  * A selection of matches for the block was already saved in
1362                  * memory so that we don't have to run the uncompressed data
1363                  * through the matchfinder on every optimization pass.  However,
1364                  * we still search for repeat offset matches during each
1365                  * optimization pass because we cannot predict the state of the
1366                  * recent offsets queue.  But as a heuristic, we don't bother
1367                  * searching for repeat offset matches if the general-purpose
1368                  * matchfinder failed to find any matches.
1369                  *
1370                  * Note that a match of length n at some offset implies there is
1371                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1372                  * that same offset.  In other words, we don't necessarily need
1373                  * to use the full length of a match.  The key heuristic that
1374                  * saves a significicant amount of time is that for each
1375                  * distinct length, we only consider the smallest offset for
1376                  * which that length is available.  This heuristic also applies
1377                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1378                  * any explicit offset.  Of course, this heuristic may be
1379                  * produce suboptimal results because offset slots in LZX are
1380                  * subject to entropy encoding, but in practice this is a useful
1381                  * heuristic.
1382                  */
1383
1384                 num_matches = cache_ptr->length;
1385                 cache_ptr++;
1386
1387                 if (num_matches) {
1388                         struct lz_match *end_matches = cache_ptr + num_matches;
1389                         unsigned next_len = LZX_MIN_MATCH_LEN;
1390                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1391                         const u8 *matchptr;
1392
1393                         /* Consider R0 match  */
1394                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1395                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1396                                 goto R0_done;
1397                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1398                         do {
1399                                 u32 cost = cur_node->cost +
1400                                            c->costs.match_cost[0][
1401                                                         next_len - LZX_MIN_MATCH_LEN];
1402                                 if (cost <= (cur_node + next_len)->cost) {
1403                                         (cur_node + next_len)->cost = cost;
1404                                         (cur_node + next_len)->item =
1405                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1406                                 }
1407                                 if (unlikely(++next_len > max_len)) {
1408                                         cache_ptr = end_matches;
1409                                         goto done_matches;
1410                                 }
1411                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1412
1413                 R0_done:
1414
1415                         /* Consider R1 match  */
1416                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1417                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1418                                 goto R1_done;
1419                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1420                                 goto R1_done;
1421                         for (unsigned len = 2; len < next_len - 1; len++)
1422                                 if (matchptr[len] != in_next[len])
1423                                         goto R1_done;
1424                         do {
1425                                 u32 cost = cur_node->cost +
1426                                            c->costs.match_cost[1][
1427                                                         next_len - LZX_MIN_MATCH_LEN];
1428                                 if (cost <= (cur_node + next_len)->cost) {
1429                                         (cur_node + next_len)->cost = cost;
1430                                         (cur_node + next_len)->item =
1431                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1432                                 }
1433                                 if (unlikely(++next_len > max_len)) {
1434                                         cache_ptr = end_matches;
1435                                         goto done_matches;
1436                                 }
1437                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1438
1439                 R1_done:
1440
1441                         /* Consider R2 match  */
1442                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1443                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1444                                 goto R2_done;
1445                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1446                                 goto R2_done;
1447                         for (unsigned len = 2; len < next_len - 1; len++)
1448                                 if (matchptr[len] != in_next[len])
1449                                         goto R2_done;
1450                         do {
1451                                 u32 cost = cur_node->cost +
1452                                            c->costs.match_cost[2][
1453                                                         next_len - LZX_MIN_MATCH_LEN];
1454                                 if (cost <= (cur_node + next_len)->cost) {
1455                                         (cur_node + next_len)->cost = cost;
1456                                         (cur_node + next_len)->item =
1457                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1458                                 }
1459                                 if (unlikely(++next_len > max_len)) {
1460                                         cache_ptr = end_matches;
1461                                         goto done_matches;
1462                                 }
1463                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1464
1465                 R2_done:
1466
1467                         while (next_len > cache_ptr->length)
1468                                 if (++cache_ptr == end_matches)
1469                                         goto done_matches;
1470
1471                         /* Consider explicit offset matches  */
1472                         do {
1473                                 u32 offset = cache_ptr->offset;
1474                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1475                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1476                                 do {
1477                                         u32 cost = cur_node->cost +
1478                                                    c->costs.match_cost[offset_slot][
1479                                                                 next_len - LZX_MIN_MATCH_LEN];
1480                                 #if LZX_CONSIDER_ALIGNED_COSTS
1481                                         if (lzx_extra_offset_bits[offset_slot] >=
1482                                             LZX_NUM_ALIGNED_OFFSET_BITS)
1483                                                 cost += c->costs.aligned[offset_data &
1484                                                                          LZX_ALIGNED_OFFSET_BITMASK];
1485                                 #endif
1486                                         if (cost < (cur_node + next_len)->cost) {
1487                                                 (cur_node + next_len)->cost = cost;
1488                                                 (cur_node + next_len)->item =
1489                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1490                                         }
1491                                 } while (++next_len <= cache_ptr->length);
1492                         } while (++cache_ptr != end_matches);
1493                 }
1494
1495         done_matches:
1496
1497                 /* Consider coding a literal.
1498
1499                  * To avoid an extra branch, actually checking the preferability
1500                  * of coding the literal is integrated into the queue update
1501                  * code below.  */
1502                 literal = *in_next++;
1503                 cost = cur_node->cost +
1504                        c->costs.main[lzx_main_symbol_for_literal(literal)];
1505
1506                 /* Advance to the next position.  */
1507                 cur_node++;
1508
1509                 /* The lowest-cost path to the current position is now known.
1510                  * Finalize the recent offsets queue that results from taking
1511                  * this lowest-cost path.  */
1512
1513                 if (cost <= cur_node->cost) {
1514                         /* Literal: queue remains unchanged.  */
1515                         cur_node->cost = cost;
1516                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1517                         QUEUE(in_next) = QUEUE(in_next - 1);
1518                 } else {
1519                         /* Match: queue update is needed.  */
1520                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1521                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1522                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1523                                 /* Explicit offset match: insert offset at front  */
1524                                 QUEUE(in_next) =
1525                                         lzx_lru_queue_push(QUEUE(in_next - len),
1526                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1527                         } else {
1528                                 /* Repeat offset match: swap offset to front  */
1529                                 QUEUE(in_next) =
1530                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1531                                                            offset_data);
1532                         }
1533                 }
1534         } while (cur_node != end_node);
1535
1536         /* Return the match offset queue at the end of the minimum cost path. */
1537         return QUEUE(block_end);
1538 }
1539
1540 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1541 static void
1542 lzx_compute_match_costs(struct lzx_compressor *c)
1543 {
1544         unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1545         struct lzx_costs *costs = &c->costs;
1546
1547         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1548
1549                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1550                 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1551                 unsigned i;
1552
1553         #if LZX_CONSIDER_ALIGNED_COSTS
1554                 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1555                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1556         #endif
1557
1558                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1559                         costs->match_cost[offset_slot][i] =
1560                                 costs->main[main_symbol++] + extra_cost;
1561
1562                 extra_cost += costs->main[main_symbol];
1563
1564                 for (; i < LZX_NUM_LENS; i++)
1565                         costs->match_cost[offset_slot][i] =
1566                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1567         }
1568 }
1569
1570 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1571  * algorithm.  */
1572 static void
1573 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1574 {
1575         u32 i;
1576         bool have_byte[256];
1577         unsigned num_used_bytes;
1578
1579         /* The costs below are hard coded to use a scaling factor of 16.  */
1580         STATIC_ASSERT(LZX_BIT_COST == 16);
1581
1582         /*
1583          * Heuristics:
1584          *
1585          * - Use smaller initial costs for literal symbols when the input buffer
1586          *   contains fewer distinct bytes.
1587          *
1588          * - Assume that match symbols are more costly than literal symbols.
1589          *
1590          * - Assume that length symbols for shorter lengths are less costly than
1591          *   length symbols for longer lengths.
1592          */
1593
1594         for (i = 0; i < 256; i++)
1595                 have_byte[i] = false;
1596
1597         for (i = 0; i < block_size; i++)
1598                 have_byte[block[i]] = true;
1599
1600         num_used_bytes = 0;
1601         for (i = 0; i < 256; i++)
1602                 num_used_bytes += have_byte[i];
1603
1604         for (i = 0; i < 256; i++)
1605                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1606
1607         for (; i < c->num_main_syms; i++)
1608                 c->costs.main[i] = 170;
1609
1610         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1611                 c->costs.len[i] = 103 + (i / 4);
1612
1613 #if LZX_CONSIDER_ALIGNED_COSTS
1614         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1615                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1616 #endif
1617
1618         lzx_compute_match_costs(c);
1619 }
1620
1621 /* Update the current cost model to reflect the computed Huffman codes.  */
1622 static void
1623 lzx_update_costs(struct lzx_compressor *c)
1624 {
1625         unsigned i;
1626         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1627
1628         for (i = 0; i < c->num_main_syms; i++)
1629                 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1630
1631         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1632                 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1633
1634 #if LZX_CONSIDER_ALIGNED_COSTS
1635         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1636                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1637 #endif
1638
1639         lzx_compute_match_costs(c);
1640 }
1641
1642 static struct lzx_lru_queue
1643 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1644                              struct lzx_output_bitstream * const restrict os,
1645                              const u8 * const restrict block_begin,
1646                              const u32 block_size,
1647                              const struct lzx_lru_queue initial_queue)
1648 {
1649         unsigned num_passes_remaining = c->num_optim_passes;
1650         struct lzx_lru_queue new_queue;
1651         u32 seq_idx;
1652
1653         /* The first optimization pass uses a default cost model.  Each
1654          * additional optimization pass uses a cost model derived from the
1655          * Huffman code computed in the previous pass.  */
1656
1657         lzx_set_default_costs(c, block_begin, block_size);
1658         lzx_reset_symbol_frequencies(c);
1659         do {
1660                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1661                                                    initial_queue);
1662                 if (num_passes_remaining > 1) {
1663                         lzx_tally_item_list(c, block_size);
1664                         lzx_make_huffman_codes(c);
1665                         lzx_update_costs(c);
1666                         lzx_reset_symbol_frequencies(c);
1667                 }
1668         } while (--num_passes_remaining);
1669
1670         seq_idx = lzx_record_item_list(c, block_size);
1671         lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1672         return new_queue;
1673 }
1674
1675 /*
1676  * This is the "near-optimal" LZX compressor.
1677  *
1678  * For each block, it performs a relatively thorough graph search to find an
1679  * inexpensive (in terms of compressed size) way to output that block.
1680  *
1681  * Note: there are actually many things this algorithm leaves on the table in
1682  * terms of compression ratio.  So although it may be "near-optimal", it is
1683  * certainly not "optimal".  The goal is not to produce the optimal compression
1684  * ratio, which for LZX is probably impossible within any practical amount of
1685  * time, but rather to produce a compression ratio significantly better than a
1686  * simpler "greedy" or "lazy" parse while still being relatively fast.
1687  */
1688 static void
1689 lzx_compress_near_optimal(struct lzx_compressor *c,
1690                           struct lzx_output_bitstream *os)
1691 {
1692         const u8 * const in_begin = c->in_buffer;
1693         const u8 *       in_next = in_begin;
1694         const u8 * const in_end  = in_begin + c->in_nbytes;
1695         unsigned max_len = LZX_MAX_MATCH_LEN;
1696         unsigned nice_len = min(c->nice_match_length, max_len);
1697         u32 next_hash;
1698         struct lzx_lru_queue queue;
1699
1700         bt_matchfinder_init(&c->bt_mf);
1701         memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1702         next_hash = bt_matchfinder_hash_3_bytes(in_next);
1703         lzx_lru_queue_init(&queue);
1704
1705         do {
1706                 /* Starting a new block  */
1707                 const u8 * const in_block_begin = in_next;
1708                 const u8 * const in_block_end =
1709                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1710
1711                 /* Run the block through the matchfinder and cache the matches. */
1712                 struct lz_match *cache_ptr = c->match_cache;
1713                 do {
1714                         struct lz_match *lz_matchptr;
1715                         u32 hash2;
1716                         pos_t cur_match;
1717                         unsigned best_len;
1718
1719                         /* If approaching the end of the input buffer, adjust
1720                          * 'max_len' and 'nice_len' accordingly.  */
1721                         if (unlikely(max_len > in_end - in_next)) {
1722                                 max_len = in_end - in_next;
1723                                 nice_len = min(max_len, nice_len);
1724
1725                                 /* This extra check is needed to ensure that we
1726                                  * never output a length 2 match of the very
1727                                  * last two bytes with the very first two bytes,
1728                                  * since such a match has an offset too large to
1729                                  * be represented.  */
1730                                 if (unlikely(max_len < 3)) {
1731                                         in_next++;
1732                                         cache_ptr->length = 0;
1733                                         cache_ptr++;
1734                                         continue;
1735                                 }
1736                         }
1737
1738                         lz_matchptr = cache_ptr + 1;
1739
1740                         /* Check for a length 2 match.  */
1741                         hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1742                         cur_match = c->hash2_tab[hash2];
1743                         c->hash2_tab[hash2] = in_next - in_begin;
1744                         if (cur_match != 0 &&
1745                             (LZX_HASH2_ORDER == 16 ||
1746                              load_u16_unaligned(&in_begin[cur_match]) ==
1747                              load_u16_unaligned(in_next)))
1748                         {
1749                                 lz_matchptr->length = 2;
1750                                 lz_matchptr->offset = in_next - &in_begin[cur_match];
1751                                 lz_matchptr++;
1752                         }
1753
1754                         /* Check for matches of length >= 3.  */
1755                         lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1756                                                                  in_begin,
1757                                                                  in_next,
1758                                                                  3,
1759                                                                  max_len,
1760                                                                  nice_len,
1761                                                                  c->max_search_depth,
1762                                                                  &next_hash,
1763                                                                  &best_len,
1764                                                                  lz_matchptr);
1765                         in_next++;
1766                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1767                         cache_ptr = lz_matchptr;
1768
1769                         /*
1770                          * If there was a very long match found, then don't
1771                          * cache any matches for the bytes covered by that
1772                          * match.  This avoids degenerate behavior when
1773                          * compressing highly redundant data, where the number
1774                          * of matches can be very large.
1775                          *
1776                          * This heuristic doesn't actually hurt the compression
1777                          * ratio very much.  If there's a long match, then the
1778                          * data must be highly compressible, so it doesn't
1779                          * matter as much what we do.
1780                          */
1781                         if (best_len >= nice_len) {
1782                                 --best_len;
1783                                 do {
1784                                         if (unlikely(max_len > in_end - in_next)) {
1785                                                 max_len = in_end - in_next;
1786                                                 nice_len = min(max_len, nice_len);
1787                                                 if (unlikely(max_len < 3)) {
1788                                                         in_next++;
1789                                                         cache_ptr->length = 0;
1790                                                         cache_ptr++;
1791                                                         continue;
1792                                                 }
1793                                         }
1794                                         c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1795                                                 in_next - in_begin;
1796                                         bt_matchfinder_skip_position(&c->bt_mf,
1797                                                                      in_begin,
1798                                                                      in_next,
1799                                                                      in_end,
1800                                                                      nice_len,
1801                                                                      c->max_search_depth,
1802                                                                      &next_hash);
1803                                         in_next++;
1804                                         cache_ptr->length = 0;
1805                                         cache_ptr++;
1806                                 } while (--best_len);
1807                         }
1808                 } while (in_next < in_block_end &&
1809                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1810
1811                 /* We've finished running the block through the matchfinder.
1812                  * Now choose a match/literal sequence and write the block.  */
1813
1814                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1815                                                      in_next - in_block_begin,
1816                                                      queue);
1817         } while (in_next != in_end);
1818 }
1819
1820 /*
1821  * Given a pointer to the current byte sequence and the current list of recent
1822  * match offsets, find the longest repeat offset match.
1823  *
1824  * If no match of at least 2 bytes is found, then return 0.
1825  *
1826  * If a match of at least 2 bytes is found, then return its length and set
1827  * *rep_max_idx_ret to the index of its offset in @queue.
1828 */
1829 static unsigned
1830 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1831                                      const u32 bytes_remaining,
1832                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1833                                      unsigned *rep_max_idx_ret)
1834 {
1835         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1836         LZX_ASSERT(bytes_remaining >= 2);
1837
1838         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1839         const u16 next_2_bytes = load_u16_unaligned(in_next);
1840         const u8 *matchptr;
1841         unsigned rep_max_len;
1842         unsigned rep_max_idx;
1843         unsigned rep_len;
1844
1845         matchptr = in_next - recent_offsets[0];
1846         if (load_u16_unaligned(matchptr) == next_2_bytes)
1847                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1848         else
1849                 rep_max_len = 0;
1850         rep_max_idx = 0;
1851
1852         matchptr = in_next - recent_offsets[1];
1853         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1854                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1855                 if (rep_len > rep_max_len) {
1856                         rep_max_len = rep_len;
1857                         rep_max_idx = 1;
1858                 }
1859         }
1860
1861         matchptr = in_next - recent_offsets[2];
1862         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1863                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1864                 if (rep_len > rep_max_len) {
1865                         rep_max_len = rep_len;
1866                         rep_max_idx = 2;
1867                 }
1868         }
1869
1870         *rep_max_idx_ret = rep_max_idx;
1871         return rep_max_len;
1872 }
1873
1874 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1875 static inline unsigned
1876 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1877 {
1878         unsigned score = len;
1879
1880         if (adjusted_offset < 4096)
1881                 score++;
1882
1883         if (adjusted_offset < 256)
1884                 score++;
1885
1886         return score;
1887 }
1888
1889 static inline unsigned
1890 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1891 {
1892         return rep_len + 3;
1893 }
1894
1895 /* This is the "lazy" LZX compressor.  */
1896 static void
1897 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1898 {
1899         const u8 * const in_begin = c->in_buffer;
1900         const u8 *       in_next = in_begin;
1901         const u8 * const in_end  = in_begin + c->in_nbytes;
1902         unsigned max_len = LZX_MAX_MATCH_LEN;
1903         unsigned nice_len = min(c->nice_match_length, max_len);
1904         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1905         u32 recent_offsets[3] = {1, 1, 1};
1906         u32 next_hashes[2] = {};
1907
1908         hc_matchfinder_init(&c->hc_mf);
1909
1910         do {
1911                 /* Starting a new block  */
1912
1913                 const u8 * const in_block_begin = in_next;
1914                 const u8 * const in_block_end =
1915                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1916                 struct lzx_sequence *next_seq = c->chosen_sequences;
1917                 unsigned cur_len;
1918                 u32 cur_offset;
1919                 u32 cur_offset_data;
1920                 unsigned cur_score;
1921                 unsigned next_len;
1922                 u32 next_offset;
1923                 u32 next_offset_data;
1924                 unsigned next_score;
1925                 unsigned rep_max_len;
1926                 unsigned rep_max_idx;
1927                 unsigned rep_score;
1928                 unsigned skip_len;
1929                 u32 litrunlen = 0;
1930
1931                 lzx_reset_symbol_frequencies(c);
1932
1933                 do {
1934                         if (unlikely(max_len > in_end - in_next)) {
1935                                 max_len = in_end - in_next;
1936                                 nice_len = min(max_len, nice_len);
1937                         }
1938
1939                         /* Find the longest match at the current position.  */
1940
1941                         cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1942                                                                in_begin,
1943                                                                in_next - in_begin,
1944                                                                2,
1945                                                                max_len,
1946                                                                nice_len,
1947                                                                c->max_search_depth,
1948                                                                next_hashes,
1949                                                                &cur_offset);
1950                         if (cur_len < 3 ||
1951                             (cur_len == 3 &&
1952                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
1953                              cur_offset != recent_offsets[0] &&
1954                              cur_offset != recent_offsets[1] &&
1955                              cur_offset != recent_offsets[2]))
1956                         {
1957                                 /* There was no match found, or the only match found
1958                                  * was a distant length 3 match.  Output a literal.  */
1959                                 lzx_record_literal(c, *in_next++, &litrunlen);
1960                                 continue;
1961                         }
1962
1963                         if (cur_offset == recent_offsets[0]) {
1964                                 in_next++;
1965                                 cur_offset_data = 0;
1966                                 skip_len = cur_len - 1;
1967                                 goto choose_cur_match;
1968                         }
1969
1970                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
1971                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
1972
1973                         /* Consider a repeat offset match  */
1974                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1975                                                                            in_end - in_next,
1976                                                                            recent_offsets,
1977                                                                            &rep_max_idx);
1978                         in_next++;
1979
1980                         if (rep_max_len >= 3 &&
1981                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1982                                                                        rep_max_idx)) >= cur_score)
1983                         {
1984                                 cur_len = rep_max_len;
1985                                 cur_offset_data = rep_max_idx;
1986                                 skip_len = rep_max_len - 1;
1987                                 goto choose_cur_match;
1988                         }
1989
1990                 have_cur_match:
1991
1992                         /* We have a match at the current position.  */
1993
1994                         /* If we have a very long match, choose it immediately.  */
1995                         if (cur_len >= nice_len) {
1996                                 skip_len = cur_len - 1;
1997                                 goto choose_cur_match;
1998                         }
1999
2000                         /* See if there's a better match at the next position.  */
2001
2002                         if (unlikely(max_len > in_end - in_next)) {
2003                                 max_len = in_end - in_next;
2004                                 nice_len = min(max_len, nice_len);
2005                         }
2006
2007                         next_len = hc_matchfinder_longest_match(&c->hc_mf,
2008                                                                 in_begin,
2009                                                                 in_next - in_begin,
2010                                                                 cur_len - 2,
2011                                                                 max_len,
2012                                                                 nice_len,
2013                                                                 c->max_search_depth / 2,
2014                                                                 next_hashes,
2015                                                                 &next_offset);
2016
2017                         if (next_len <= cur_len - 2) {
2018                                 in_next++;
2019                                 skip_len = cur_len - 2;
2020                                 goto choose_cur_match;
2021                         }
2022
2023                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2024                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2025
2026                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2027                                                                            in_end - in_next,
2028                                                                            recent_offsets,
2029                                                                            &rep_max_idx);
2030                         in_next++;
2031
2032                         if (rep_max_len >= 3 &&
2033                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2034                                                                        rep_max_idx)) >= next_score)
2035                         {
2036
2037                                 if (rep_score > cur_score) {
2038                                         /* The next match is better, and it's a
2039                                          * repeat offset match.  */
2040                                         lzx_record_literal(c, *(in_next - 2),
2041                                                            &litrunlen);
2042                                         cur_len = rep_max_len;
2043                                         cur_offset_data = rep_max_idx;
2044                                         skip_len = cur_len - 1;
2045                                         goto choose_cur_match;
2046                                 }
2047                         } else {
2048                                 if (next_score > cur_score) {
2049                                         /* The next match is better, and it's an
2050                                          * explicit offset match.  */
2051                                         lzx_record_literal(c, *(in_next - 2),
2052                                                            &litrunlen);
2053                                         cur_len = next_len;
2054                                         cur_offset_data = next_offset_data;
2055                                         cur_score = next_score;
2056                                         goto have_cur_match;
2057                                 }
2058                         }
2059
2060                         /* The original match was better.  */
2061                         skip_len = cur_len - 2;
2062
2063                 choose_cur_match:
2064                         lzx_record_match(c, cur_len, cur_offset_data,
2065                                          recent_offsets, &litrunlen, &next_seq);
2066                         in_next = hc_matchfinder_skip_positions(&c->hc_mf,
2067                                                                 in_begin,
2068                                                                 in_next - in_begin,
2069                                                                 in_end - in_begin,
2070                                                                 skip_len,
2071                                                                 next_hashes);
2072                 } while (in_next < in_block_end);
2073
2074                 lzx_finish_sequence(next_seq, litrunlen);
2075
2076                 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2077
2078         } while (in_next != in_end);
2079 }
2080
2081 /* Generate the acceleration tables for offset slots.  */
2082 static void
2083 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2084 {
2085         u32 adjusted_offset = 0;
2086         unsigned slot = 0;
2087
2088         /* slots [0, 29]  */
2089         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2090              adjusted_offset++)
2091         {
2092                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2093                         slot++;
2094                 c->offset_slot_tab_1[adjusted_offset] = slot;
2095         }
2096
2097         /* slots [30, 49]  */
2098         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2099              adjusted_offset += (u32)1 << 14)
2100         {
2101                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2102                         slot++;
2103                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2104         }
2105 }
2106
2107 static size_t
2108 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2109 {
2110         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2111                 return offsetof(struct lzx_compressor, hc_mf) +
2112                         hc_matchfinder_size(max_bufsize);
2113         } else {
2114                 return offsetof(struct lzx_compressor, bt_mf) +
2115                         bt_matchfinder_size(max_bufsize);
2116         }
2117 }
2118
2119 static u64
2120 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2121                       bool destructive)
2122 {
2123         u64 size = 0;
2124
2125         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2126                 return 0;
2127
2128         size += lzx_get_compressor_size(max_bufsize, compression_level);
2129         if (!destructive)
2130                 size += max_bufsize; /* in_buffer */
2131         return size;
2132 }
2133
2134 static int
2135 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2136                       bool destructive, void **c_ret)
2137 {
2138         unsigned window_order;
2139         struct lzx_compressor *c;
2140
2141         window_order = lzx_get_window_order(max_bufsize);
2142         if (window_order == 0)
2143                 return WIMLIB_ERR_INVALID_PARAM;
2144
2145         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2146         if (!c)
2147                 goto oom0;
2148
2149         c->destructive = destructive;
2150
2151         c->num_main_syms = lzx_get_num_main_syms(window_order);
2152         c->window_order = window_order;
2153
2154         if (!c->destructive) {
2155                 c->in_buffer = MALLOC(max_bufsize);
2156                 if (!c->in_buffer)
2157                         goto oom1;
2158         }
2159
2160         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2161
2162                 /* Fast compression: Use lazy parsing.  */
2163
2164                 c->impl = lzx_compress_lazy;
2165                 c->max_search_depth = (36 * compression_level) / 20;
2166                 c->nice_match_length = (72 * compression_level) / 20;
2167
2168                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2169                  * halves the max_search_depth when attempting a lazy match, and
2170                  * max_search_depth cannot be 0.  */
2171                 if (c->max_search_depth < 2)
2172                         c->max_search_depth = 2;
2173         } else {
2174
2175                 /* Normal / high compression: Use near-optimal parsing.  */
2176
2177                 c->impl = lzx_compress_near_optimal;
2178
2179                 /* Scale nice_match_length and max_search_depth with the
2180                  * compression level.  */
2181                 c->max_search_depth = (24 * compression_level) / 50;
2182                 c->nice_match_length = (32 * compression_level) / 50;
2183
2184                 /* Set a number of optimization passes appropriate for the
2185                  * compression level.  */
2186
2187                 c->num_optim_passes = 1;
2188
2189                 if (compression_level >= 45)
2190                         c->num_optim_passes++;
2191
2192                 /* Use more optimization passes for higher compression levels.
2193                  * But the more passes there are, the less they help --- so
2194                  * don't add them linearly.  */
2195                 if (compression_level >= 70) {
2196                         c->num_optim_passes++;
2197                         if (compression_level >= 100)
2198                                 c->num_optim_passes++;
2199                         if (compression_level >= 150)
2200                                 c->num_optim_passes++;
2201                         if (compression_level >= 200)
2202                                 c->num_optim_passes++;
2203                         if (compression_level >= 300)
2204                                 c->num_optim_passes++;
2205                 }
2206         }
2207
2208         /* max_search_depth == 0 is invalid.  */
2209         if (c->max_search_depth < 1)
2210                 c->max_search_depth = 1;
2211
2212         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2213                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2214
2215         lzx_init_offset_slot_tabs(c);
2216         *c_ret = c;
2217         return 0;
2218
2219 oom1:
2220         FREE(c);
2221 oom0:
2222         return WIMLIB_ERR_NOMEM;
2223 }
2224
2225 static size_t
2226 lzx_compress(const void *restrict in, size_t in_nbytes,
2227              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2228 {
2229         struct lzx_compressor *c = _c;
2230         struct lzx_output_bitstream os;
2231         size_t result;
2232
2233         /* Don't bother trying to compress very small inputs.  */
2234         if (in_nbytes < 100)
2235                 return 0;
2236
2237         /* Copy the input data into the internal buffer and preprocess it.  */
2238         if (c->destructive)
2239                 c->in_buffer = (void *)in;
2240         else
2241                 memcpy(c->in_buffer, in, in_nbytes);
2242         c->in_nbytes = in_nbytes;
2243         lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2244
2245         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2246         c->codes_index = 0;
2247         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2248
2249         /* Initialize the output bitstream.  */
2250         lzx_init_output(&os, out, out_nbytes_avail);
2251
2252         /* Call the compression level-specific compress() function.  */
2253         (*c->impl)(c, &os);
2254
2255         /* Flush the output bitstream and return the compressed size or 0.  */
2256         result = lzx_flush_output(&os);
2257         if (!result && c->destructive)
2258                 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2259         return result;
2260 }
2261
2262 static void
2263 lzx_free_compressor(void *_c)
2264 {
2265         struct lzx_compressor *c = _c;
2266
2267         if (!c->destructive)
2268                 FREE(c->in_buffer);
2269         FREE(c);
2270 }
2271
2272 const struct compressor_ops lzx_compressor_ops = {
2273         .get_needed_memory  = lzx_get_needed_memory,
2274         .create_compressor  = lzx_create_compressor,
2275         .compress           = lzx_compress,
2276         .free_compressor    = lzx_free_compressor,
2277 };