]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
Allow hc_matchfinder and bt_matchfinder to be "templated"
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior (immediately terminating the block) on
83  * cache overflow is still required.
84  */
85 #define LZX_CACHE_PER_POS       7
86
87 /*
88  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89  * excluding the extra "overflow" entries.  The per-position multiplier is '1 +
90  * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91  * overhead of one lz_match per position, used to hold the match count at that
92  * position.
93  */
94 #define LZX_CACHE_LENGTH        (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
95
96 /*
97  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98  * ever be saved in the match cache for a single position.  Since each match we
99  * save for a single position has a distinct length, we can use the number of
100  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
101  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102  * it will never actually be reached.
103  */
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
105
106 /*
107  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108  * This makes it possible to consider fractional bit costs.
109  *
110  * Note: this is only useful as a statistical trick for when the true costs are
111  * unknown.  In reality, each token in LZX requires a whole number of bits to
112  * output.
113  */
114 #define LZX_BIT_COST            16
115
116 /*
117  * Consideration of aligned offset costs is disabled for now, due to
118  * insufficient benefit gained from the time spent.
119  */
120 #define LZX_CONSIDER_ALIGNED_COSTS      0
121
122 /*
123  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
124  * faster algorithm.
125  */
126 #define LZX_MAX_FAST_LEVEL      34
127
128 /*
129  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130  * for finding length 2 matches.  This can be as high as 16 (in which case the
131  * hash function is trivial), but using a smaller hash table speeds up
132  * compression due to reduced cache pressure.
133  */
134 #define LZX_HASH2_ORDER         12
135 #define LZX_HASH2_LENGTH        (1UL << LZX_HASH2_ORDER)
136
137 /*
138  * These are the compressor-side limits on the codeword lengths for each Huffman
139  * code.  To make outputting bits slightly faster, some of these limits are
140  * lower than the limits defined by the LZX format.  This does not significantly
141  * affect the compression ratio, at least for the block sizes we use.
142  */
143 #define MAIN_CODEWORD_LIMIT     12      /* 64-bit: can buffer 4 main symbols  */
144 #define LENGTH_CODEWORD_LIMIT   12
145 #define ALIGNED_CODEWORD_LIMIT  7
146 #define PRE_CODEWORD_LIMIT      7
147
148 #include "wimlib/compress_common.h"
149 #include "wimlib/compressor_ops.h"
150 #include "wimlib/error.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/lzx_common.h"
153 #include "wimlib/unaligned.h"
154 #include "wimlib/util.h"
155
156 /* Matchfinders with 16-bit positions  */
157 #define pos_t   u16
158 #define MF_SUFFIX _16
159 #include "wimlib/bt_matchfinder.h"
160 #include "wimlib/hc_matchfinder.h"
161
162 /* Matchfinders with 32-bit positions  */
163 #undef pos_t
164 #undef MF_SUFFIX
165 #define pos_t   u32
166 #define MF_SUFFIX _32
167 #include "wimlib/bt_matchfinder.h"
168 #include "wimlib/hc_matchfinder.h"
169
170 struct lzx_output_bitstream;
171
172 /* Codewords for the LZX Huffman codes.  */
173 struct lzx_codewords {
174         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
175         u32 len[LZX_LENCODE_NUM_SYMBOLS];
176         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
177 };
178
179 /* Codeword lengths (in bits) for the LZX Huffman codes.
180  * A zero length means the corresponding codeword has zero frequency.  */
181 struct lzx_lens {
182         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
183         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
184         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
185 };
186
187 /* Cost model for near-optimal parsing  */
188 struct lzx_costs {
189
190         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
191          * length 'len' match that has an offset belonging to 'offset_slot'.  */
192         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
193
194         /* Cost for each symbol in the main code  */
195         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
196
197         /* Cost for each symbol in the length code  */
198         u32 len[LZX_LENCODE_NUM_SYMBOLS];
199
200 #if LZX_CONSIDER_ALIGNED_COSTS
201         /* Cost for each symbol in the aligned code  */
202         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
203 #endif
204 };
205
206 /* Codewords and lengths for the LZX Huffman codes.  */
207 struct lzx_codes {
208         struct lzx_codewords codewords;
209         struct lzx_lens lens;
210 };
211
212 /* Symbol frequency counters for the LZX Huffman codes.  */
213 struct lzx_freqs {
214         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
215         u32 len[LZX_LENCODE_NUM_SYMBOLS];
216         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
217 };
218
219 /*
220  * Represents a run of literals followed by a match or end-of-block.  This
221  * struct is needed to temporarily store items chosen by the parser, since items
222  * cannot be written until all items for the block have been chosen and the
223  * block's Huffman codes have been computed.
224  */
225 struct lzx_sequence {
226
227         /* The number of literals in the run.  This may be 0.  The literals are
228          * not stored explicitly in this structure; instead, they are read
229          * directly from the uncompressed data.  */
230         u16 litrunlen;
231
232         /* If the next field doesn't indicate end-of-block, then this is the
233          * match length minus LZX_MIN_MATCH_LEN.  */
234         u16 adjusted_length;
235
236         /* If bit 31 is clear, then this field contains the match header in bits
237          * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
238          * Otherwise, this sequence's literal run was the last literal run in
239          * the block, so there is no match that follows it.  */
240         u32 adjusted_offset_and_match_hdr;
241 };
242
243 /*
244  * This structure represents a byte position in the input buffer and a node in
245  * the graph of possible match/literal choices.
246  *
247  * Logically, each incoming edge to this node is labeled with a literal or a
248  * match that can be taken to reach this position from an earlier position; and
249  * each outgoing edge from this node is labeled with a literal or a match that
250  * can be taken to advance from this position to a later position.
251  */
252 struct lzx_optimum_node {
253
254         /* The cost, in bits, of the lowest-cost path that has been found to
255          * reach this position.  This can change as progressively lower cost
256          * paths are found to reach this position.  */
257         u32 cost;
258
259         /*
260          * The match or literal that was taken to reach this position.  This can
261          * change as progressively lower cost paths are found to reach this
262          * position.
263          *
264          * This variable is divided into two bitfields.
265          *
266          * Literals:
267          *      Low bits are 0, high bits are the literal.
268          *
269          * Explicit offset matches:
270          *      Low bits are the match length, high bits are the offset plus 2.
271          *
272          * Repeat offset matches:
273          *      Low bits are the match length, high bits are the queue index.
274          */
275         u32 item;
276 #define OPTIMUM_OFFSET_SHIFT 9
277 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
278 } _aligned_attribute(8);
279
280 /*
281  * Least-recently-used queue for match offsets.
282  *
283  * This is represented as a 64-bit integer for efficiency.  There are three
284  * offsets of 21 bits each.  Bit 64 is garbage.
285  */
286 struct lzx_lru_queue {
287         u64 R;
288 };
289
290 #define LZX_QUEUE64_OFFSET_SHIFT 21
291 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
292
293 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
294 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
295 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
296
297 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
298 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
299 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
300
301 static inline void
302 lzx_lru_queue_init(struct lzx_lru_queue *queue)
303 {
304         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
305                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
306                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
307 }
308
309 static inline u64
310 lzx_lru_queue_R0(struct lzx_lru_queue queue)
311 {
312         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
313 }
314
315 static inline u64
316 lzx_lru_queue_R1(struct lzx_lru_queue queue)
317 {
318         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
319 }
320
321 static inline u64
322 lzx_lru_queue_R2(struct lzx_lru_queue queue)
323 {
324         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
325 }
326
327 /* Push a match offset onto the front (most recently used) end of the queue.  */
328 static inline struct lzx_lru_queue
329 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
330 {
331         return (struct lzx_lru_queue) {
332                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
333         };
334 }
335
336 /* Pop a match offset off the front (most recently used) end of the queue.  */
337 static inline u32
338 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
339 {
340         u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
341         queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
342         return offset;
343 }
344
345 /* Swap a match offset to the front of the queue.  */
346 static inline struct lzx_lru_queue
347 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
348 {
349         if (idx == 0)
350                 return queue;
351
352         if (idx == 1)
353                 return (struct lzx_lru_queue) {
354                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
355                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
356                              (queue.R & LZX_QUEUE64_R2_MASK),
357                 };
358
359         return (struct lzx_lru_queue) {
360                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
361                      (queue.R & LZX_QUEUE64_R1_MASK) |
362                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
363         };
364 }
365
366 /* The main LZX compressor structure  */
367 struct lzx_compressor {
368
369         /* The "nice" match length: if a match of this length is found, then
370          * choose it immediately without further consideration.  */
371         unsigned nice_match_length;
372
373         /* The maximum search depth: consider at most this many potential
374          * matches at each position.  */
375         unsigned max_search_depth;
376
377         /* The log base 2 of the LZX window size for LZ match offset encoding
378          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
379          * LZX_MAX_WINDOW_ORDER.  */
380         unsigned window_order;
381
382         /* The number of symbols in the main alphabet.  This depends on
383          * @window_order, since @window_order determines the maximum possible
384          * offset.  */
385         unsigned num_main_syms;
386
387         /* Number of optimization passes per block  */
388         unsigned num_optim_passes;
389
390         /* The preprocessed buffer of data being compressed  */
391         u8 *in_buffer;
392
393         /* The number of bytes of data to be compressed, which is the number of
394          * bytes of data in @in_buffer that are actually valid.  */
395         size_t in_nbytes;
396
397         /* Pointer to the compress() implementation chosen at allocation time */
398         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
399
400         /* If true, the compressor need not preserve the input buffer if it
401          * compresses the data successfully.  */
402         bool destructive;
403
404         /* The Huffman symbol frequency counters for the current block.  */
405         struct lzx_freqs freqs;
406
407         /* The Huffman codes for the current and previous blocks.  The one with
408          * index 'codes_index' is for the current block, and the other one is
409          * for the previous block.  */
410         struct lzx_codes codes[2];
411         unsigned codes_index;
412
413         /* The matches and literals that the parser has chosen for the current
414          * block.  The required length of this array is limited by the maximum
415          * number of matches that can ever be chosen for a single block.  */
416         struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
417
418         /* Tables for mapping adjusted offsets to offset slots  */
419
420         /* offset slots [0, 29]  */
421         u8 offset_slot_tab_1[32768];
422
423         /* offset slots [30, 49]  */
424         u8 offset_slot_tab_2[128];
425
426         union {
427                 /* Data for greedy or lazy parsing  */
428                 struct {
429                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
430                         union {
431                                 struct hc_matchfinder_16 hc_mf_16;
432                                 struct hc_matchfinder_32 hc_mf_32;
433                         };
434                 };
435
436                 /* Data for near-optimal parsing  */
437                 struct {
438                         /*
439                          * The graph nodes for the current block.
440                          *
441                          * We need at least 'LZX_DIV_BLOCK_SIZE +
442                          * LZX_MAX_MATCH_LEN - 1' nodes because that is the
443                          * maximum block size that may be used.  Add 1 because
444                          * we need a node to represent end-of-block.
445                          *
446                          * It is possible that nodes past end-of-block are
447                          * accessed during match consideration, but this can
448                          * only occur if the block was truncated at
449                          * LZX_DIV_BLOCK_SIZE.  So the same bound still applies.
450                          * Note that since nodes past the end of the block will
451                          * never actually have an effect on the items that are
452                          * chosen for the block, it makes no difference what
453                          * their costs are initialized to (if anything).
454                          */
455                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
456                                                               LZX_MAX_MATCH_LEN - 1 + 1];
457
458                         /* The cost model for the current block  */
459                         struct lzx_costs costs;
460
461                         /*
462                          * Cached matches for the current block.  This array
463                          * contains the matches that were found at each position
464                          * in the block.  Specifically, for each position, there
465                          * is a special 'struct lz_match' whose 'length' field
466                          * contains the number of matches that were found at
467                          * that position; this is followed by the matches
468                          * themselves, if any, sorted by strictly increasing
469                          * length.
470                          *
471                          * Note: in rare cases, there will be a very high number
472                          * of matches in the block and this array will overflow.
473                          * If this happens, we force the end of the current
474                          * block.  LZX_CACHE_LENGTH is the length at which we
475                          * actually check for overflow.  The extra slots beyond
476                          * this are enough to absorb the worst case overflow,
477                          * which occurs if starting at
478                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
479                          * match count header, then write
480                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
481                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
482                          * write the match count header for each.
483                          */
484                         struct lz_match match_cache[LZX_CACHE_LENGTH +
485                                                     LZX_MAX_MATCHES_PER_POS +
486                                                     LZX_MAX_MATCH_LEN - 1];
487
488                         /* Hash table for finding length 2 matches  */
489                         u32 hash2_tab[LZX_HASH2_LENGTH];
490
491                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
492                         union {
493                                 struct bt_matchfinder_16 bt_mf_16;
494                                 struct bt_matchfinder_32 bt_mf_32;
495                         };
496                 };
497         };
498 };
499
500 /*
501  * Will a matchfinder using 16-bit positions be sufficient for compressing
502  * buffers of up to the specified size?  The limit could be 65536 bytes, but we
503  * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
504  * This requires that the limit be no more than the length of offset_slot_tab_1
505  * (currently 32768).
506  */
507 static inline bool
508 lzx_is_16_bit(size_t max_bufsize)
509 {
510         STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
511         return max_bufsize <= 32768;
512 }
513
514 /*
515  * The following macros call either the 16-bit or the 32-bit version of a
516  * matchfinder function based on the value of 'is_16_bit', which will be known
517  * at compilation time.
518  */
519
520 #define CALL_HC_MF(is_16_bit, c, funcname, ...)                               \
521         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
522                        CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
523
524 #define CALL_BT_MF(is_16_bit, c, funcname, ...)                               \
525         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
526                        CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
527
528 /*
529  * Structure to keep track of the current state of sending bits to the
530  * compressed output buffer.
531  *
532  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
533  */
534 struct lzx_output_bitstream {
535
536         /* Bits that haven't yet been written to the output buffer.  */
537         machine_word_t bitbuf;
538
539         /* Number of bits currently held in @bitbuf.  */
540         u32 bitcount;
541
542         /* Pointer to the start of the output buffer.  */
543         u8 *start;
544
545         /* Pointer to the position in the output buffer at which the next coding
546          * unit should be written.  */
547         u8 *next;
548
549         /* Pointer just past the end of the output buffer, rounded down to a
550          * 2-byte boundary.  */
551         u8 *end;
552 };
553
554 /* Can the specified number of bits always be added to 'bitbuf' after any
555  * pending 16-bit coding units have been flushed?  */
556 #define CAN_BUFFER(n)   ((n) <= (8 * sizeof(machine_word_t)) - 16)
557
558 /*
559  * Initialize the output bitstream.
560  *
561  * @os
562  *      The output bitstream structure to initialize.
563  * @buffer
564  *      The buffer being written to.
565  * @size
566  *      Size of @buffer, in bytes.
567  */
568 static void
569 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
570 {
571         os->bitbuf = 0;
572         os->bitcount = 0;
573         os->start = buffer;
574         os->next = os->start;
575         os->end = os->start + (size & ~1);
576 }
577
578 /* Add some bits to the bitbuffer variable of the output bitstream.  The caller
579  * must make sure there is enough room.  */
580 static inline void
581 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
582 {
583         os->bitbuf = (os->bitbuf << num_bits) | bits;
584         os->bitcount += num_bits;
585 }
586
587 /* Flush bits from the bitbuffer variable to the output buffer.  'max_num_bits'
588  * specifies the maximum number of bits that may have been added since the last
589  * flush.  */
590 static inline void
591 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
592 {
593         if (os->end - os->next < 6)
594                 return;
595         put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
596         if (max_num_bits > 16)
597                 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
598         if (max_num_bits > 32)
599                 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
600         os->next += (os->bitcount >> 4) << 1;
601         os->bitcount &= 15;
602 }
603
604 /* Add at most 16 bits to the bitbuffer and flush it.  */
605 static inline void
606 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
607 {
608         lzx_add_bits(os, bits, num_bits);
609         lzx_flush_bits(os, 16);
610 }
611
612 /*
613  * Flush the last coding unit to the output buffer if needed.  Return the total
614  * number of bytes written to the output buffer, or 0 if an overflow occurred.
615  */
616 static u32
617 lzx_flush_output(struct lzx_output_bitstream *os)
618 {
619         if (os->end - os->next < 6)
620                 return 0;
621
622         if (os->bitcount != 0) {
623                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
624                 os->next += 2;
625         }
626
627         return os->next - os->start;
628 }
629
630 /* Build the main, length, and aligned offset Huffman codes used in LZX.
631  *
632  * This takes as input the frequency tables for each code and produces as output
633  * a set of tables that map symbols to codewords and codeword lengths.  */
634 static void
635 lzx_make_huffman_codes(struct lzx_compressor *c)
636 {
637         const struct lzx_freqs *freqs = &c->freqs;
638         struct lzx_codes *codes = &c->codes[c->codes_index];
639
640         STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
641                       MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
642         STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
643                       LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
644         STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
645                       ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
646
647         make_canonical_huffman_code(c->num_main_syms,
648                                     MAIN_CODEWORD_LIMIT,
649                                     freqs->main,
650                                     codes->lens.main,
651                                     codes->codewords.main);
652
653         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
654                                     LENGTH_CODEWORD_LIMIT,
655                                     freqs->len,
656                                     codes->lens.len,
657                                     codes->codewords.len);
658
659         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
660                                     ALIGNED_CODEWORD_LIMIT,
661                                     freqs->aligned,
662                                     codes->lens.aligned,
663                                     codes->codewords.aligned);
664 }
665
666 /* Reset the symbol frequencies for the LZX Huffman codes.  */
667 static void
668 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
669 {
670         memset(&c->freqs, 0, sizeof(c->freqs));
671 }
672
673 static unsigned
674 lzx_compute_precode_items(const u8 lens[restrict],
675                           const u8 prev_lens[restrict],
676                           u32 precode_freqs[restrict],
677                           unsigned precode_items[restrict])
678 {
679         unsigned *itemptr;
680         unsigned run_start;
681         unsigned run_end;
682         unsigned extra_bits;
683         int delta;
684         u8 len;
685
686         itemptr = precode_items;
687         run_start = 0;
688
689         while (!((len = lens[run_start]) & 0x80)) {
690
691                 /* len = the length being repeated  */
692
693                 /* Find the next run of codeword lengths.  */
694
695                 run_end = run_start + 1;
696
697                 /* Fast case for a single length.  */
698                 if (likely(len != lens[run_end])) {
699                         delta = prev_lens[run_start] - len;
700                         if (delta < 0)
701                                 delta += 17;
702                         precode_freqs[delta]++;
703                         *itemptr++ = delta;
704                         run_start++;
705                         continue;
706                 }
707
708                 /* Extend the run.  */
709                 do {
710                         run_end++;
711                 } while (len == lens[run_end]);
712
713                 if (len == 0) {
714                         /* Run of zeroes.  */
715
716                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
717                         while ((run_end - run_start) >= 20) {
718                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
719                                 precode_freqs[18]++;
720                                 *itemptr++ = 18 | (extra_bits << 5);
721                                 run_start += 20 + extra_bits;
722                         }
723
724                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
725                         if ((run_end - run_start) >= 4) {
726                                 extra_bits = min((run_end - run_start) - 4, 0xf);
727                                 precode_freqs[17]++;
728                                 *itemptr++ = 17 | (extra_bits << 5);
729                                 run_start += 4 + extra_bits;
730                         }
731                 } else {
732
733                         /* A run of nonzero lengths. */
734
735                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
736                         while ((run_end - run_start) >= 4) {
737                                 extra_bits = (run_end - run_start) > 4;
738                                 delta = prev_lens[run_start] - len;
739                                 if (delta < 0)
740                                         delta += 17;
741                                 precode_freqs[19]++;
742                                 precode_freqs[delta]++;
743                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
744                                 run_start += 4 + extra_bits;
745                         }
746                 }
747
748                 /* Output any remaining lengths without RLE.  */
749                 while (run_start != run_end) {
750                         delta = prev_lens[run_start] - len;
751                         if (delta < 0)
752                                 delta += 17;
753                         precode_freqs[delta]++;
754                         *itemptr++ = delta;
755                         run_start++;
756                 }
757         }
758
759         return itemptr - precode_items;
760 }
761
762 /*
763  * Output a Huffman code in the compressed form used in LZX.
764  *
765  * The Huffman code is represented in the output as a logical series of codeword
766  * lengths from which the Huffman code, which must be in canonical form, can be
767  * reconstructed.
768  *
769  * The codeword lengths are themselves compressed using a separate Huffman code,
770  * the "precode", which contains a symbol for each possible codeword length in
771  * the larger code as well as several special symbols to represent repeated
772  * codeword lengths (a form of run-length encoding).  The precode is itself
773  * constructed in canonical form, and its codeword lengths are represented
774  * literally in 20 4-bit fields that immediately precede the compressed codeword
775  * lengths of the larger code.
776  *
777  * Furthermore, the codeword lengths of the larger code are actually represented
778  * as deltas from the codeword lengths of the corresponding code in the previous
779  * block.
780  *
781  * @os:
782  *      Bitstream to which to write the compressed Huffman code.
783  * @lens:
784  *      The codeword lengths, indexed by symbol, in the Huffman code.
785  * @prev_lens:
786  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
787  *      code in the previous block, or all zeroes if this is the first block.
788  * @num_lens:
789  *      The number of symbols in the Huffman code.
790  */
791 static void
792 lzx_write_compressed_code(struct lzx_output_bitstream *os,
793                           const u8 lens[restrict],
794                           const u8 prev_lens[restrict],
795                           unsigned num_lens)
796 {
797         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
798         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
799         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
800         unsigned precode_items[num_lens];
801         unsigned num_precode_items;
802         unsigned precode_item;
803         unsigned precode_sym;
804         unsigned i;
805         u8 saved = lens[num_lens];
806         *(u8 *)(lens + num_lens) = 0x80;
807
808         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
809                 precode_freqs[i] = 0;
810
811         /* Compute the "items" (RLE / literal tokens and extra bits) with which
812          * the codeword lengths in the larger code will be output.  */
813         num_precode_items = lzx_compute_precode_items(lens,
814                                                       prev_lens,
815                                                       precode_freqs,
816                                                       precode_items);
817
818         /* Build the precode.  */
819         STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
820                       PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
821         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
822                                     PRE_CODEWORD_LIMIT,
823                                     precode_freqs, precode_lens,
824                                     precode_codewords);
825
826         /* Output the lengths of the codewords in the precode.  */
827         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
828                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
829
830         /* Output the encoded lengths of the codewords in the larger code.  */
831         for (i = 0; i < num_precode_items; i++) {
832                 precode_item = precode_items[i];
833                 precode_sym = precode_item & 0x1F;
834                 lzx_add_bits(os, precode_codewords[precode_sym],
835                              precode_lens[precode_sym]);
836                 if (precode_sym >= 17) {
837                         if (precode_sym == 17) {
838                                 lzx_add_bits(os, precode_item >> 5, 4);
839                         } else if (precode_sym == 18) {
840                                 lzx_add_bits(os, precode_item >> 5, 5);
841                         } else {
842                                 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
843                                 precode_sym = precode_item >> 6;
844                                 lzx_add_bits(os, precode_codewords[precode_sym],
845                                              precode_lens[precode_sym]);
846                         }
847                 }
848                 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
849                 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
850         }
851
852         *(u8 *)(lens + num_lens) = saved;
853 }
854
855 /*
856  * Write all matches and literal bytes (which were precomputed) in an LZX
857  * compressed block to the output bitstream in the final compressed
858  * representation.
859  *
860  * @os
861  *      The output bitstream.
862  * @block_type
863  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
864  *      LZX_BLOCKTYPE_VERBATIM).
865  * @block_data
866  *      The uncompressed data of the block.
867  * @sequences
868  *      The matches and literals to output, given as a series of sequences.
869  * @codes
870  *      The main, length, and aligned offset Huffman codes for the current
871  *      LZX compressed block.
872  */
873 static void
874 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
875                     const u8 *block_data, const struct lzx_sequence sequences[],
876                     const struct lzx_codes *codes)
877 {
878         const struct lzx_sequence *seq = sequences;
879         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
880
881         for (;;) {
882                 /* Output the next sequence.  */
883
884                 unsigned litrunlen = seq->litrunlen;
885                 unsigned match_hdr;
886                 unsigned main_symbol;
887                 unsigned adjusted_length;
888                 u32 adjusted_offset;
889                 unsigned offset_slot;
890                 unsigned num_extra_bits;
891                 u32 extra_bits;
892
893                 /* Output the literal run of the sequence.  */
894
895                 if (litrunlen) {  /* Is the literal run nonempty?  */
896
897                         /* Verify optimization is enabled on 64-bit  */
898                         STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
899                                       CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
900
901                         if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
902
903                                 /* 64-bit: write 4 literals at a time.  */
904                                 while (litrunlen >= 4) {
905                                         unsigned lit0 = block_data[0];
906                                         unsigned lit1 = block_data[1];
907                                         unsigned lit2 = block_data[2];
908                                         unsigned lit3 = block_data[3];
909                                         lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
910                                         lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
911                                         lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
912                                         lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
913                                         lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
914                                         block_data += 4;
915                                         litrunlen -= 4;
916                                 }
917                                 if (litrunlen--) {
918                                         unsigned lit = *block_data++;
919                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
920                                         if (litrunlen--) {
921                                                 unsigned lit = *block_data++;
922                                                 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
923                                                 if (litrunlen--) {
924                                                         unsigned lit = *block_data++;
925                                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
926                                                         lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
927                                                 } else {
928                                                         lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
929                                                 }
930                                         } else {
931                                                 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
932                                         }
933                                 }
934                         } else {
935                                 /* 32-bit: write 1 literal at a time.  */
936                                 do {
937                                         unsigned lit = *block_data++;
938                                         lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
939                                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
940                                 } while (--litrunlen);
941                         }
942                 }
943
944                 /* Was this the last literal run?  */
945                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
946                         return;
947
948                 /* Nope; output the match.  */
949
950                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
951                 main_symbol = LZX_NUM_CHARS + match_hdr;
952                 adjusted_length = seq->adjusted_length;
953
954                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
955
956                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
957                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
958
959                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
960                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
961
962         #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
963                                  14 + ALIGNED_CODEWORD_LIMIT)
964
965                 /* Verify optimization is enabled on 64-bit  */
966                 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
967
968                 /* Output the main symbol for the match.  */
969
970                 lzx_add_bits(os, codes->codewords.main[main_symbol],
971                              codes->lens.main[main_symbol]);
972                 if (!CAN_BUFFER(MAX_MATCH_BITS))
973                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
974
975                 /* If needed, output the length symbol for the match.  */
976
977                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
978                         lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
979                                      codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
980                         if (!CAN_BUFFER(MAX_MATCH_BITS))
981                                 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
982                 }
983
984                 /* Output the extra offset bits for the match.  In aligned
985                  * offset blocks, the lowest 3 bits of the adjusted offset are
986                  * Huffman-encoded using the aligned offset code, provided that
987                  * there are at least extra 3 offset bits required.  All other
988                  * extra offset bits are output verbatim.  */
989
990                 if ((adjusted_offset & ones_if_aligned) >= 16) {
991
992                         lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
993                                      num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
994                         if (!CAN_BUFFER(MAX_MATCH_BITS))
995                                 lzx_flush_bits(os, 14);
996
997                         lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
998                                      codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
999                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1000                                 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1001                 } else {
1002                         lzx_add_bits(os, extra_bits, num_extra_bits);
1003                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1004                                 lzx_flush_bits(os, 17);
1005                 }
1006
1007                 if (CAN_BUFFER(MAX_MATCH_BITS))
1008                         lzx_flush_bits(os, MAX_MATCH_BITS);
1009
1010                 /* Advance to the next sequence.  */
1011                 seq++;
1012         }
1013 }
1014
1015 static void
1016 lzx_write_compressed_block(const u8 *block_begin,
1017                            int block_type,
1018                            u32 block_size,
1019                            unsigned window_order,
1020                            unsigned num_main_syms,
1021                            const struct lzx_sequence sequences[],
1022                            const struct lzx_codes * codes,
1023                            const struct lzx_lens * prev_lens,
1024                            struct lzx_output_bitstream * os)
1025 {
1026         LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
1027                    block_type == LZX_BLOCKTYPE_VERBATIM);
1028
1029         /* The first three bits indicate the type of block and are one of the
1030          * LZX_BLOCKTYPE_* constants.  */
1031         lzx_write_bits(os, block_type, 3);
1032
1033         /* Output the block size.
1034          *
1035          * The original LZX format seemed to always encode the block size in 3
1036          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
1037          * uses the first bit to indicate whether the block is the default size
1038          * (32768) or a different size given explicitly by the next 16 bits.
1039          *
1040          * By default, this compressor uses a window size of 32768 and therefore
1041          * follows the WIMGAPI behavior.  However, this compressor also supports
1042          * window sizes greater than 32768 bytes, which do not appear to be
1043          * supported by WIMGAPI.  In such cases, we retain the default size bit
1044          * to mean a size of 32768 bytes but output non-default block size in 24
1045          * bits rather than 16.  The compatibility of this behavior is unknown
1046          * because WIMs created with chunk size greater than 32768 can seemingly
1047          * only be opened by wimlib anyway.  */
1048         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1049                 lzx_write_bits(os, 1, 1);
1050         } else {
1051                 lzx_write_bits(os, 0, 1);
1052
1053                 if (window_order >= 16)
1054                         lzx_write_bits(os, block_size >> 16, 8);
1055
1056                 lzx_write_bits(os, block_size & 0xFFFF, 16);
1057         }
1058
1059         /* If it's an aligned offset block, output the aligned offset code.  */
1060         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1061                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1062                         lzx_write_bits(os, codes->lens.aligned[i],
1063                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
1064                 }
1065         }
1066
1067         /* Output the main code (two parts).  */
1068         lzx_write_compressed_code(os, codes->lens.main,
1069                                   prev_lens->main,
1070                                   LZX_NUM_CHARS);
1071         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1072                                   prev_lens->main + LZX_NUM_CHARS,
1073                                   num_main_syms - LZX_NUM_CHARS);
1074
1075         /* Output the length code.  */
1076         lzx_write_compressed_code(os, codes->lens.len,
1077                                   prev_lens->len,
1078                                   LZX_LENCODE_NUM_SYMBOLS);
1079
1080         /* Output the compressed matches and literals.  */
1081         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1082 }
1083
1084 /* Given the frequencies of symbols in an LZX-compressed block and the
1085  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1086  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1087  * will take fewer bits to output.  */
1088 static int
1089 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1090                                const struct lzx_codes * codes)
1091 {
1092         u32 aligned_cost = 0;
1093         u32 verbatim_cost = 0;
1094
1095         /* A verbatim block requires 3 bits in each place that an aligned symbol
1096          * would be used in an aligned offset block.  */
1097         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1098                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1099                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1100         }
1101
1102         /* Account for output of the aligned offset code.  */
1103         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1104
1105         if (aligned_cost < verbatim_cost)
1106                 return LZX_BLOCKTYPE_ALIGNED;
1107         else
1108                 return LZX_BLOCKTYPE_VERBATIM;
1109 }
1110
1111 /*
1112  * Return the offset slot for the specified adjusted match offset, using the
1113  * compressor's acceleration tables to speed up the mapping.
1114  */
1115 static inline unsigned
1116 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1117                          bool is_16_bit)
1118 {
1119         if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1120                 return c->offset_slot_tab_1[adjusted_offset];
1121         return c->offset_slot_tab_2[adjusted_offset >> 14];
1122 }
1123
1124 /*
1125  * Finish an LZX block:
1126  *
1127  * - build the Huffman codes
1128  * - decide whether to output the block as VERBATIM or ALIGNED
1129  * - output the block
1130  * - swap the indices of the current and previous Huffman codes
1131  */
1132 static void
1133 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1134                  const u8 *block_begin, u32 block_size, u32 seq_idx)
1135 {
1136         int block_type;
1137
1138         lzx_make_huffman_codes(c);
1139
1140         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1141                                                     &c->codes[c->codes_index]);
1142         lzx_write_compressed_block(block_begin,
1143                                    block_type,
1144                                    block_size,
1145                                    c->window_order,
1146                                    c->num_main_syms,
1147                                    &c->chosen_sequences[seq_idx],
1148                                    &c->codes[c->codes_index],
1149                                    &c->codes[c->codes_index ^ 1].lens,
1150                                    os);
1151         c->codes_index ^= 1;
1152 }
1153
1154 /* Tally the Huffman symbol for a literal and increment the literal run length.
1155  */
1156 static inline void
1157 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1158 {
1159         c->freqs.main[literal]++;
1160         ++*litrunlen_p;
1161 }
1162
1163 /* Tally the Huffman symbol for a match, save the match data and the length of
1164  * the preceding literal run in the next lzx_sequence, and update the recent
1165  * offsets queue.  */
1166 static inline void
1167 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1168                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1169                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1170 {
1171         u32 litrunlen = *litrunlen_p;
1172         struct lzx_sequence *next_seq = *next_seq_p;
1173         unsigned offset_slot;
1174         unsigned v;
1175
1176         v = length - LZX_MIN_MATCH_LEN;
1177
1178         /* Save the literal run length and adjusted length.  */
1179         next_seq->litrunlen = litrunlen;
1180         next_seq->adjusted_length = v;
1181
1182         /* Compute the length header and tally the length symbol if needed  */
1183         if (v >= LZX_NUM_PRIMARY_LENS) {
1184                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1185                 v = LZX_NUM_PRIMARY_LENS;
1186         }
1187
1188         /* Compute the offset slot  */
1189         offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1190
1191         /* Compute the match header.  */
1192         v += offset_slot * LZX_NUM_LEN_HEADERS;
1193
1194         /* Save the adjusted offset and match header.  */
1195         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1196
1197         /* Tally the main symbol.  */
1198         c->freqs.main[LZX_NUM_CHARS + v]++;
1199
1200         /* Update the recent offsets queue.  */
1201         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1202                 /* Repeat offset match  */
1203                 swap(recent_offsets[0], recent_offsets[offset_data]);
1204         } else {
1205                 /* Explicit offset match  */
1206
1207                 /* Tally the aligned offset symbol if needed  */
1208                 if (offset_data >= 16)
1209                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1210
1211                 recent_offsets[2] = recent_offsets[1];
1212                 recent_offsets[1] = recent_offsets[0];
1213                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1214         }
1215
1216         /* Reset the literal run length and advance to the next sequence.  */
1217         *next_seq_p = next_seq + 1;
1218         *litrunlen_p = 0;
1219 }
1220
1221 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1222  * there is no match.  This literal run may be empty.  */
1223 static inline void
1224 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1225 {
1226         last_seq->litrunlen = litrunlen;
1227
1228         /* Special value to mark last sequence  */
1229         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1230 }
1231
1232 /*
1233  * Given the minimum-cost path computed through the item graph for the current
1234  * block, walk the path and count how many of each symbol in each Huffman-coded
1235  * alphabet would be required to output the items (matches and literals) along
1236  * the path.
1237  *
1238  * Note that the path will be walked backwards (from the end of the block to the
1239  * beginning of the block), but this doesn't matter because this function only
1240  * computes frequencies.
1241  */
1242 static inline void
1243 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1244 {
1245         u32 node_idx = block_size;
1246         for (;;) {
1247                 u32 len;
1248                 u32 offset_data;
1249                 unsigned v;
1250                 unsigned offset_slot;
1251
1252                 /* Tally literals until either a match or the beginning of the
1253                  * block is reached.  */
1254                 for (;;) {
1255                         u32 item = c->optimum_nodes[node_idx].item;
1256
1257                         len = item & OPTIMUM_LEN_MASK;
1258                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1259
1260                         if (len != 0)  /* Not a literal?  */
1261                                 break;
1262
1263                         /* Tally the main symbol for the literal.  */
1264                         c->freqs.main[offset_data]++;
1265
1266                         if (--node_idx == 0) /* Beginning of block was reached?  */
1267                                 return;
1268                 }
1269
1270                 node_idx -= len;
1271
1272                 /* Tally a match.  */
1273
1274                 /* Tally the aligned offset symbol if needed.  */
1275                 if (offset_data >= 16)
1276                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1277
1278                 /* Tally the length symbol if needed.  */
1279                 v = len - LZX_MIN_MATCH_LEN;;
1280                 if (v >= LZX_NUM_PRIMARY_LENS) {
1281                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1282                         v = LZX_NUM_PRIMARY_LENS;
1283                 }
1284
1285                 /* Tally the main symbol.  */
1286                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1287                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1288                 c->freqs.main[LZX_NUM_CHARS + v]++;
1289
1290                 if (node_idx == 0) /* Beginning of block was reached?  */
1291                         return;
1292         }
1293 }
1294
1295 /*
1296  * Like lzx_tally_item_list(), but this function also generates the list of
1297  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1298  * ready to be output to the bitstream after the Huffman codes are computed.
1299  * The lzx_sequences will be written to decreasing memory addresses as the path
1300  * is walked backwards, which means they will end up in the expected
1301  * first-to-last order.  The return value is the index in c->chosen_sequences at
1302  * which the lzx_sequences begin.
1303  */
1304 static inline u32
1305 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1306 {
1307         u32 node_idx = block_size;
1308         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1309         u32 lit_start_node;
1310
1311         /* Special value to mark last sequence  */
1312         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1313
1314         lit_start_node = node_idx;
1315         for (;;) {
1316                 u32 len;
1317                 u32 offset_data;
1318                 unsigned v;
1319                 unsigned offset_slot;
1320
1321                 /* Record literals until either a match or the beginning of the
1322                  * block is reached.  */
1323                 for (;;) {
1324                         u32 item = c->optimum_nodes[node_idx].item;
1325
1326                         len = item & OPTIMUM_LEN_MASK;
1327                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1328
1329                         if (len != 0) /* Not a literal?  */
1330                                 break;
1331
1332                         /* Tally the main symbol for the literal.  */
1333                         c->freqs.main[offset_data]++;
1334
1335                         if (--node_idx == 0) /* Beginning of block was reached?  */
1336                                 goto out;
1337                 }
1338
1339                 /* Save the literal run length for the next sequence (the
1340                  * "previous sequence" when walking backwards).  */
1341                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1342                 node_idx -= len;
1343                 lit_start_node = node_idx;
1344
1345                 /* Record a match.  */
1346
1347                 /* Tally the aligned offset symbol if needed.  */
1348                 if (offset_data >= 16)
1349                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1350
1351                 /* Save the adjusted length.  */
1352                 v = len - LZX_MIN_MATCH_LEN;
1353                 c->chosen_sequences[seq_idx].adjusted_length = v;
1354
1355                 /* Tally the length symbol if needed.  */
1356                 if (v >= LZX_NUM_PRIMARY_LENS) {
1357                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1358                         v = LZX_NUM_PRIMARY_LENS;
1359                 }
1360
1361                 /* Tally the main symbol.  */
1362                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1363                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1364                 c->freqs.main[LZX_NUM_CHARS + v]++;
1365
1366                 /* Save the adjusted offset and match header.  */
1367                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1368                                 (offset_data << 9) | v;
1369
1370                 if (node_idx == 0) /* Beginning of block was reached?  */
1371                         goto out;
1372         }
1373
1374 out:
1375         /* Save the literal run length for the first sequence.  */
1376         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1377
1378         /* Return the index in c->chosen_sequences at which the lzx_sequences
1379          * begin.  */
1380         return seq_idx;
1381 }
1382
1383 /*
1384  * Find an inexpensive path through the graph of possible match/literal choices
1385  * for the current block.  The nodes of the graph are
1386  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1387  * the current block, plus one extra node for end-of-block.  The edges of the
1388  * graph are matches and literals.  The goal is to find the minimum cost path
1389  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1390  *
1391  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1392  * proceeding forwards one node at a time.  At each node, a selection of matches
1393  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1394  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1395  * such a path is the lowest cost found so far to reach that later node, then
1396  * that later node is updated with the new path.
1397  *
1398  * Note that although this algorithm is based on minimum cost path search, due
1399  * to various simplifying assumptions the result is not guaranteed to be the
1400  * true minimum cost, or "optimal", path over the graph of all valid LZX
1401  * representations of this block.
1402  *
1403  * Also, note that because of the presence of the recent offsets queue (which is
1404  * a type of adaptive state), the algorithm cannot work backwards and compute
1405  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1406  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1407  * only an approximation.  It's possible for the globally optimal, minimum cost
1408  * path to contain a prefix, ending at a position, where that path prefix is
1409  * *not* the minimum cost path to that position.  This can happen if such a path
1410  * prefix results in a different adaptive state which results in lower costs
1411  * later.  The algorithm does not solve this problem; it only considers the
1412  * lowest cost to reach each individual position.
1413  */
1414 static inline struct lzx_lru_queue
1415 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1416                        const u8 * const restrict block_begin,
1417                        const u32 block_size,
1418                        const struct lzx_lru_queue initial_queue,
1419                        bool is_16_bit)
1420 {
1421         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1422         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1423         struct lz_match *cache_ptr = c->match_cache;
1424         const u8 *in_next = block_begin;
1425         const u8 * const block_end = block_begin + block_size;
1426
1427         /* Instead of storing the match offset LRU queues in the
1428          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1429          * storing them in a smaller array.  This works because the algorithm
1430          * only requires a limited history of the adaptive state.  Once a given
1431          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1432          * it is no longer needed.  */
1433         struct lzx_lru_queue queues[512];
1434
1435         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1436 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1437
1438         /* Initially, the cost to reach each node is "infinity".  */
1439         memset(c->optimum_nodes, 0xFF,
1440                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1441
1442         QUEUE(block_begin) = initial_queue;
1443
1444         /* The following loop runs 'block_size' iterations, one per node.  */
1445         do {
1446                 unsigned num_matches;
1447                 unsigned literal;
1448                 u32 cost;
1449
1450                 /*
1451                  * A selection of matches for the block was already saved in
1452                  * memory so that we don't have to run the uncompressed data
1453                  * through the matchfinder on every optimization pass.  However,
1454                  * we still search for repeat offset matches during each
1455                  * optimization pass because we cannot predict the state of the
1456                  * recent offsets queue.  But as a heuristic, we don't bother
1457                  * searching for repeat offset matches if the general-purpose
1458                  * matchfinder failed to find any matches.
1459                  *
1460                  * Note that a match of length n at some offset implies there is
1461                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1462                  * that same offset.  In other words, we don't necessarily need
1463                  * to use the full length of a match.  The key heuristic that
1464                  * saves a significicant amount of time is that for each
1465                  * distinct length, we only consider the smallest offset for
1466                  * which that length is available.  This heuristic also applies
1467                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1468                  * any explicit offset.  Of course, this heuristic may be
1469                  * produce suboptimal results because offset slots in LZX are
1470                  * subject to entropy encoding, but in practice this is a useful
1471                  * heuristic.
1472                  */
1473
1474                 num_matches = cache_ptr->length;
1475                 cache_ptr++;
1476
1477                 if (num_matches) {
1478                         struct lz_match *end_matches = cache_ptr + num_matches;
1479                         unsigned next_len = LZX_MIN_MATCH_LEN;
1480                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1481                         const u8 *matchptr;
1482
1483                         /* Consider R0 match  */
1484                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1485                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1486                                 goto R0_done;
1487                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1488                         do {
1489                                 u32 cost = cur_node->cost +
1490                                            c->costs.match_cost[0][
1491                                                         next_len - LZX_MIN_MATCH_LEN];
1492                                 if (cost <= (cur_node + next_len)->cost) {
1493                                         (cur_node + next_len)->cost = cost;
1494                                         (cur_node + next_len)->item =
1495                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1496                                 }
1497                                 if (unlikely(++next_len > max_len)) {
1498                                         cache_ptr = end_matches;
1499                                         goto done_matches;
1500                                 }
1501                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1502
1503                 R0_done:
1504
1505                         /* Consider R1 match  */
1506                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1507                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1508                                 goto R1_done;
1509                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1510                                 goto R1_done;
1511                         for (unsigned len = 2; len < next_len - 1; len++)
1512                                 if (matchptr[len] != in_next[len])
1513                                         goto R1_done;
1514                         do {
1515                                 u32 cost = cur_node->cost +
1516                                            c->costs.match_cost[1][
1517                                                         next_len - LZX_MIN_MATCH_LEN];
1518                                 if (cost <= (cur_node + next_len)->cost) {
1519                                         (cur_node + next_len)->cost = cost;
1520                                         (cur_node + next_len)->item =
1521                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1522                                 }
1523                                 if (unlikely(++next_len > max_len)) {
1524                                         cache_ptr = end_matches;
1525                                         goto done_matches;
1526                                 }
1527                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1528
1529                 R1_done:
1530
1531                         /* Consider R2 match  */
1532                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1533                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1534                                 goto R2_done;
1535                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1536                                 goto R2_done;
1537                         for (unsigned len = 2; len < next_len - 1; len++)
1538                                 if (matchptr[len] != in_next[len])
1539                                         goto R2_done;
1540                         do {
1541                                 u32 cost = cur_node->cost +
1542                                            c->costs.match_cost[2][
1543                                                         next_len - LZX_MIN_MATCH_LEN];
1544                                 if (cost <= (cur_node + next_len)->cost) {
1545                                         (cur_node + next_len)->cost = cost;
1546                                         (cur_node + next_len)->item =
1547                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1548                                 }
1549                                 if (unlikely(++next_len > max_len)) {
1550                                         cache_ptr = end_matches;
1551                                         goto done_matches;
1552                                 }
1553                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1554
1555                 R2_done:
1556
1557                         while (next_len > cache_ptr->length)
1558                                 if (++cache_ptr == end_matches)
1559                                         goto done_matches;
1560
1561                         /* Consider explicit offset matches  */
1562                         do {
1563                                 u32 offset = cache_ptr->offset;
1564                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1565                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1566                                                                                 is_16_bit);
1567                                 do {
1568                                         u32 cost = cur_node->cost +
1569                                                    c->costs.match_cost[offset_slot][
1570                                                                 next_len - LZX_MIN_MATCH_LEN];
1571                                 #if LZX_CONSIDER_ALIGNED_COSTS
1572                                         if (lzx_extra_offset_bits[offset_slot] >=
1573                                             LZX_NUM_ALIGNED_OFFSET_BITS)
1574                                                 cost += c->costs.aligned[offset_data &
1575                                                                          LZX_ALIGNED_OFFSET_BITMASK];
1576                                 #endif
1577                                         if (cost < (cur_node + next_len)->cost) {
1578                                                 (cur_node + next_len)->cost = cost;
1579                                                 (cur_node + next_len)->item =
1580                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1581                                         }
1582                                 } while (++next_len <= cache_ptr->length);
1583                         } while (++cache_ptr != end_matches);
1584                 }
1585
1586         done_matches:
1587
1588                 /* Consider coding a literal.
1589
1590                  * To avoid an extra branch, actually checking the preferability
1591                  * of coding the literal is integrated into the queue update
1592                  * code below.  */
1593                 literal = *in_next++;
1594                 cost = cur_node->cost +
1595                        c->costs.main[lzx_main_symbol_for_literal(literal)];
1596
1597                 /* Advance to the next position.  */
1598                 cur_node++;
1599
1600                 /* The lowest-cost path to the current position is now known.
1601                  * Finalize the recent offsets queue that results from taking
1602                  * this lowest-cost path.  */
1603
1604                 if (cost <= cur_node->cost) {
1605                         /* Literal: queue remains unchanged.  */
1606                         cur_node->cost = cost;
1607                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1608                         QUEUE(in_next) = QUEUE(in_next - 1);
1609                 } else {
1610                         /* Match: queue update is needed.  */
1611                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1612                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1613                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1614                                 /* Explicit offset match: insert offset at front  */
1615                                 QUEUE(in_next) =
1616                                         lzx_lru_queue_push(QUEUE(in_next - len),
1617                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1618                         } else {
1619                                 /* Repeat offset match: swap offset to front  */
1620                                 QUEUE(in_next) =
1621                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1622                                                            offset_data);
1623                         }
1624                 }
1625         } while (cur_node != end_node);
1626
1627         /* Return the match offset queue at the end of the minimum cost path. */
1628         return QUEUE(block_end);
1629 }
1630
1631 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1632 static void
1633 lzx_compute_match_costs(struct lzx_compressor *c)
1634 {
1635         unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1636         struct lzx_costs *costs = &c->costs;
1637
1638         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1639
1640                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1641                 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1642                 unsigned i;
1643
1644         #if LZX_CONSIDER_ALIGNED_COSTS
1645                 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1646                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1647         #endif
1648
1649                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1650                         costs->match_cost[offset_slot][i] =
1651                                 costs->main[main_symbol++] + extra_cost;
1652
1653                 extra_cost += costs->main[main_symbol];
1654
1655                 for (; i < LZX_NUM_LENS; i++)
1656                         costs->match_cost[offset_slot][i] =
1657                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1658         }
1659 }
1660
1661 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1662  * algorithm.  */
1663 static void
1664 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1665 {
1666         u32 i;
1667         bool have_byte[256];
1668         unsigned num_used_bytes;
1669
1670         /* The costs below are hard coded to use a scaling factor of 16.  */
1671         STATIC_ASSERT(LZX_BIT_COST == 16);
1672
1673         /*
1674          * Heuristics:
1675          *
1676          * - Use smaller initial costs for literal symbols when the input buffer
1677          *   contains fewer distinct bytes.
1678          *
1679          * - Assume that match symbols are more costly than literal symbols.
1680          *
1681          * - Assume that length symbols for shorter lengths are less costly than
1682          *   length symbols for longer lengths.
1683          */
1684
1685         for (i = 0; i < 256; i++)
1686                 have_byte[i] = false;
1687
1688         for (i = 0; i < block_size; i++)
1689                 have_byte[block[i]] = true;
1690
1691         num_used_bytes = 0;
1692         for (i = 0; i < 256; i++)
1693                 num_used_bytes += have_byte[i];
1694
1695         for (i = 0; i < 256; i++)
1696                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1697
1698         for (; i < c->num_main_syms; i++)
1699                 c->costs.main[i] = 170;
1700
1701         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1702                 c->costs.len[i] = 103 + (i / 4);
1703
1704 #if LZX_CONSIDER_ALIGNED_COSTS
1705         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1706                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1707 #endif
1708
1709         lzx_compute_match_costs(c);
1710 }
1711
1712 /* Update the current cost model to reflect the computed Huffman codes.  */
1713 static void
1714 lzx_update_costs(struct lzx_compressor *c)
1715 {
1716         unsigned i;
1717         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1718
1719         for (i = 0; i < c->num_main_syms; i++)
1720                 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1721
1722         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1723                 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1724
1725 #if LZX_CONSIDER_ALIGNED_COSTS
1726         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1727                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1728 #endif
1729
1730         lzx_compute_match_costs(c);
1731 }
1732
1733 static inline struct lzx_lru_queue
1734 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1735                              struct lzx_output_bitstream * const restrict os,
1736                              const u8 * const restrict block_begin,
1737                              const u32 block_size,
1738                              const struct lzx_lru_queue initial_queue,
1739                              bool is_16_bit)
1740 {
1741         unsigned num_passes_remaining = c->num_optim_passes;
1742         struct lzx_lru_queue new_queue;
1743         u32 seq_idx;
1744
1745         /* The first optimization pass uses a default cost model.  Each
1746          * additional optimization pass uses a cost model derived from the
1747          * Huffman code computed in the previous pass.  */
1748
1749         lzx_set_default_costs(c, block_begin, block_size);
1750         lzx_reset_symbol_frequencies(c);
1751         do {
1752                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1753                                                    initial_queue, is_16_bit);
1754                 if (num_passes_remaining > 1) {
1755                         lzx_tally_item_list(c, block_size, is_16_bit);
1756                         lzx_make_huffman_codes(c);
1757                         lzx_update_costs(c);
1758                         lzx_reset_symbol_frequencies(c);
1759                 }
1760         } while (--num_passes_remaining);
1761
1762         seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1763         lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1764         return new_queue;
1765 }
1766
1767 /*
1768  * This is the "near-optimal" LZX compressor.
1769  *
1770  * For each block, it performs a relatively thorough graph search to find an
1771  * inexpensive (in terms of compressed size) way to output that block.
1772  *
1773  * Note: there are actually many things this algorithm leaves on the table in
1774  * terms of compression ratio.  So although it may be "near-optimal", it is
1775  * certainly not "optimal".  The goal is not to produce the optimal compression
1776  * ratio, which for LZX is probably impossible within any practical amount of
1777  * time, but rather to produce a compression ratio significantly better than a
1778  * simpler "greedy" or "lazy" parse while still being relatively fast.
1779  */
1780 static inline void
1781 lzx_compress_near_optimal(struct lzx_compressor *c,
1782                           struct lzx_output_bitstream *os,
1783                           bool is_16_bit)
1784 {
1785         const u8 * const in_begin = c->in_buffer;
1786         const u8 *       in_next = in_begin;
1787         const u8 * const in_end  = in_begin + c->in_nbytes;
1788         unsigned max_len = LZX_MAX_MATCH_LEN;
1789         unsigned nice_len = min(c->nice_match_length, max_len);
1790         u32 next_hash;
1791         struct lzx_lru_queue queue;
1792
1793         CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
1794         memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1795         next_hash = bt_matchfinder_hash_3_bytes(in_next);
1796         lzx_lru_queue_init(&queue);
1797
1798         do {
1799                 /* Starting a new block  */
1800                 const u8 * const in_block_begin = in_next;
1801                 const u8 * const in_block_end =
1802                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1803
1804                 /* Run the block through the matchfinder and cache the matches. */
1805                 struct lz_match *cache_ptr = c->match_cache;
1806                 do {
1807                         struct lz_match *lz_matchptr;
1808                         u32 hash2;
1809                         pos_t cur_match;
1810                         unsigned best_len;
1811
1812                         /* If approaching the end of the input buffer, adjust
1813                          * 'max_len' and 'nice_len' accordingly.  */
1814                         if (unlikely(max_len > in_end - in_next)) {
1815                                 max_len = in_end - in_next;
1816                                 nice_len = min(max_len, nice_len);
1817
1818                                 /* This extra check is needed to ensure that we
1819                                  * never output a length 2 match of the very
1820                                  * last two bytes with the very first two bytes,
1821                                  * since such a match has an offset too large to
1822                                  * be represented.  */
1823                                 if (unlikely(max_len < 3)) {
1824                                         in_next++;
1825                                         cache_ptr->length = 0;
1826                                         cache_ptr++;
1827                                         continue;
1828                                 }
1829                         }
1830
1831                         lz_matchptr = cache_ptr + 1;
1832
1833                         /* Check for a length 2 match.  */
1834                         hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1835                         cur_match = c->hash2_tab[hash2];
1836                         c->hash2_tab[hash2] = in_next - in_begin;
1837                         if (cur_match != 0 &&
1838                             (LZX_HASH2_ORDER == 16 ||
1839                              load_u16_unaligned(&in_begin[cur_match]) ==
1840                              load_u16_unaligned(in_next)))
1841                         {
1842                                 lz_matchptr->length = 2;
1843                                 lz_matchptr->offset = in_next - &in_begin[cur_match];
1844                                 lz_matchptr++;
1845                         }
1846
1847                         /* Check for matches of length >= 3.  */
1848                         lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches,
1849                                                  in_begin,
1850                                                  in_next,
1851                                                  3,
1852                                                  max_len,
1853                                                  nice_len,
1854                                                  c->max_search_depth,
1855                                                  &next_hash,
1856                                                  &best_len,
1857                                                  lz_matchptr);
1858                         in_next++;
1859                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1860                         cache_ptr = lz_matchptr;
1861
1862                         /*
1863                          * If there was a very long match found, then don't
1864                          * cache any matches for the bytes covered by that
1865                          * match.  This avoids degenerate behavior when
1866                          * compressing highly redundant data, where the number
1867                          * of matches can be very large.
1868                          *
1869                          * This heuristic doesn't actually hurt the compression
1870                          * ratio very much.  If there's a long match, then the
1871                          * data must be highly compressible, so it doesn't
1872                          * matter as much what we do.
1873                          */
1874                         if (best_len >= nice_len) {
1875                                 --best_len;
1876                                 do {
1877                                         if (unlikely(max_len > in_end - in_next)) {
1878                                                 max_len = in_end - in_next;
1879                                                 nice_len = min(max_len, nice_len);
1880                                                 if (unlikely(max_len < 3)) {
1881                                                         in_next++;
1882                                                         cache_ptr->length = 0;
1883                                                         cache_ptr++;
1884                                                         continue;
1885                                                 }
1886                                         }
1887                                         c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1888                                                 in_next - in_begin;
1889                                         CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position,
1890                                                    in_begin,
1891                                                    in_next,
1892                                                    in_end,
1893                                                    nice_len,
1894                                                    c->max_search_depth,
1895                                                    &next_hash);
1896                                         in_next++;
1897                                         cache_ptr->length = 0;
1898                                         cache_ptr++;
1899                                 } while (--best_len);
1900                         }
1901                 } while (in_next < in_block_end &&
1902                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1903
1904                 /* We've finished running the block through the matchfinder.
1905                  * Now choose a match/literal sequence and write the block.  */
1906
1907                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1908                                                      in_next - in_block_begin,
1909                                                      queue, is_16_bit);
1910         } while (in_next != in_end);
1911 }
1912
1913 static void
1914 lzx_compress_near_optimal_16(struct lzx_compressor *c,
1915                              struct lzx_output_bitstream *os)
1916 {
1917         lzx_compress_near_optimal(c, os, true);
1918 }
1919
1920 static void
1921 lzx_compress_near_optimal_32(struct lzx_compressor *c,
1922                              struct lzx_output_bitstream *os)
1923 {
1924         lzx_compress_near_optimal(c, os, false);
1925 }
1926
1927 /*
1928  * Given a pointer to the current byte sequence and the current list of recent
1929  * match offsets, find the longest repeat offset match.
1930  *
1931  * If no match of at least 2 bytes is found, then return 0.
1932  *
1933  * If a match of at least 2 bytes is found, then return its length and set
1934  * *rep_max_idx_ret to the index of its offset in @queue.
1935 */
1936 static unsigned
1937 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1938                                      const u32 bytes_remaining,
1939                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1940                                      unsigned *rep_max_idx_ret)
1941 {
1942         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1943         LZX_ASSERT(bytes_remaining >= 2);
1944
1945         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1946         const u16 next_2_bytes = load_u16_unaligned(in_next);
1947         const u8 *matchptr;
1948         unsigned rep_max_len;
1949         unsigned rep_max_idx;
1950         unsigned rep_len;
1951
1952         matchptr = in_next - recent_offsets[0];
1953         if (load_u16_unaligned(matchptr) == next_2_bytes)
1954                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1955         else
1956                 rep_max_len = 0;
1957         rep_max_idx = 0;
1958
1959         matchptr = in_next - recent_offsets[1];
1960         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1961                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1962                 if (rep_len > rep_max_len) {
1963                         rep_max_len = rep_len;
1964                         rep_max_idx = 1;
1965                 }
1966         }
1967
1968         matchptr = in_next - recent_offsets[2];
1969         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1970                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1971                 if (rep_len > rep_max_len) {
1972                         rep_max_len = rep_len;
1973                         rep_max_idx = 2;
1974                 }
1975         }
1976
1977         *rep_max_idx_ret = rep_max_idx;
1978         return rep_max_len;
1979 }
1980
1981 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1982 static inline unsigned
1983 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1984 {
1985         unsigned score = len;
1986
1987         if (adjusted_offset < 4096)
1988                 score++;
1989
1990         if (adjusted_offset < 256)
1991                 score++;
1992
1993         return score;
1994 }
1995
1996 static inline unsigned
1997 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1998 {
1999         return rep_len + 3;
2000 }
2001
2002 /* This is the "lazy" LZX compressor.  */
2003 static inline void
2004 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2005                   bool is_16_bit)
2006 {
2007         const u8 * const in_begin = c->in_buffer;
2008         const u8 *       in_next = in_begin;
2009         const u8 * const in_end  = in_begin + c->in_nbytes;
2010         unsigned max_len = LZX_MAX_MATCH_LEN;
2011         unsigned nice_len = min(c->nice_match_length, max_len);
2012         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2013         u32 recent_offsets[3] = {1, 1, 1};
2014         u32 next_hashes[2] = {};
2015
2016         CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2017
2018         do {
2019                 /* Starting a new block  */
2020
2021                 const u8 * const in_block_begin = in_next;
2022                 const u8 * const in_block_end =
2023                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
2024                 struct lzx_sequence *next_seq = c->chosen_sequences;
2025                 unsigned cur_len;
2026                 u32 cur_offset;
2027                 u32 cur_offset_data;
2028                 unsigned cur_score;
2029                 unsigned next_len;
2030                 u32 next_offset;
2031                 u32 next_offset_data;
2032                 unsigned next_score;
2033                 unsigned rep_max_len;
2034                 unsigned rep_max_idx;
2035                 unsigned rep_score;
2036                 unsigned skip_len;
2037                 u32 litrunlen = 0;
2038
2039                 lzx_reset_symbol_frequencies(c);
2040
2041                 do {
2042                         if (unlikely(max_len > in_end - in_next)) {
2043                                 max_len = in_end - in_next;
2044                                 nice_len = min(max_len, nice_len);
2045                         }
2046
2047                         /* Find the longest match at the current position.  */
2048
2049                         cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
2050                                              in_begin,
2051                                              in_next - in_begin,
2052                                              2,
2053                                              max_len,
2054                                              nice_len,
2055                                              c->max_search_depth,
2056                                              next_hashes,
2057                                              &cur_offset);
2058                         if (cur_len < 3 ||
2059                             (cur_len == 3 &&
2060                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2061                              cur_offset != recent_offsets[0] &&
2062                              cur_offset != recent_offsets[1] &&
2063                              cur_offset != recent_offsets[2]))
2064                         {
2065                                 /* There was no match found, or the only match found
2066                                  * was a distant length 3 match.  Output a literal.  */
2067                                 lzx_record_literal(c, *in_next++, &litrunlen);
2068                                 continue;
2069                         }
2070
2071                         if (cur_offset == recent_offsets[0]) {
2072                                 in_next++;
2073                                 cur_offset_data = 0;
2074                                 skip_len = cur_len - 1;
2075                                 goto choose_cur_match;
2076                         }
2077
2078                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2079                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2080
2081                         /* Consider a repeat offset match  */
2082                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2083                                                                            in_end - in_next,
2084                                                                            recent_offsets,
2085                                                                            &rep_max_idx);
2086                         in_next++;
2087
2088                         if (rep_max_len >= 3 &&
2089                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2090                                                                        rep_max_idx)) >= cur_score)
2091                         {
2092                                 cur_len = rep_max_len;
2093                                 cur_offset_data = rep_max_idx;
2094                                 skip_len = rep_max_len - 1;
2095                                 goto choose_cur_match;
2096                         }
2097
2098                 have_cur_match:
2099
2100                         /* We have a match at the current position.  */
2101
2102                         /* If we have a very long match, choose it immediately.  */
2103                         if (cur_len >= nice_len) {
2104                                 skip_len = cur_len - 1;
2105                                 goto choose_cur_match;
2106                         }
2107
2108                         /* See if there's a better match at the next position.  */
2109
2110                         if (unlikely(max_len > in_end - in_next)) {
2111                                 max_len = in_end - in_next;
2112                                 nice_len = min(max_len, nice_len);
2113                         }
2114
2115                         next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
2116                                               in_begin,
2117                                               in_next - in_begin,
2118                                               cur_len - 2,
2119                                               max_len,
2120                                               nice_len,
2121                                               c->max_search_depth / 2,
2122                                               next_hashes,
2123                                               &next_offset);
2124
2125                         if (next_len <= cur_len - 2) {
2126                                 in_next++;
2127                                 skip_len = cur_len - 2;
2128                                 goto choose_cur_match;
2129                         }
2130
2131                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2132                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2133
2134                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2135                                                                            in_end - in_next,
2136                                                                            recent_offsets,
2137                                                                            &rep_max_idx);
2138                         in_next++;
2139
2140                         if (rep_max_len >= 3 &&
2141                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2142                                                                        rep_max_idx)) >= next_score)
2143                         {
2144
2145                                 if (rep_score > cur_score) {
2146                                         /* The next match is better, and it's a
2147                                          * repeat offset match.  */
2148                                         lzx_record_literal(c, *(in_next - 2),
2149                                                            &litrunlen);
2150                                         cur_len = rep_max_len;
2151                                         cur_offset_data = rep_max_idx;
2152                                         skip_len = cur_len - 1;
2153                                         goto choose_cur_match;
2154                                 }
2155                         } else {
2156                                 if (next_score > cur_score) {
2157                                         /* The next match is better, and it's an
2158                                          * explicit offset match.  */
2159                                         lzx_record_literal(c, *(in_next - 2),
2160                                                            &litrunlen);
2161                                         cur_len = next_len;
2162                                         cur_offset_data = next_offset_data;
2163                                         cur_score = next_score;
2164                                         goto have_cur_match;
2165                                 }
2166                         }
2167
2168                         /* The original match was better.  */
2169                         skip_len = cur_len - 2;
2170
2171                 choose_cur_match:
2172                         lzx_record_match(c, cur_len, cur_offset_data,
2173                                          recent_offsets, is_16_bit,
2174                                          &litrunlen, &next_seq);
2175                         in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions,
2176                                              in_begin,
2177                                              in_next - in_begin,
2178                                              in_end - in_begin,
2179                                              skip_len,
2180                                              next_hashes);
2181                 } while (in_next < in_block_end);
2182
2183                 lzx_finish_sequence(next_seq, litrunlen);
2184
2185                 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2186
2187         } while (in_next != in_end);
2188 }
2189
2190 static void
2191 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2192 {
2193         lzx_compress_lazy(c, os, true);
2194 }
2195
2196 static void
2197 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2198 {
2199         lzx_compress_lazy(c, os, false);
2200 }
2201
2202 /* Generate the acceleration tables for offset slots.  */
2203 static void
2204 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2205 {
2206         u32 adjusted_offset = 0;
2207         unsigned slot = 0;
2208
2209         /* slots [0, 29]  */
2210         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2211              adjusted_offset++)
2212         {
2213                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2214                         slot++;
2215                 c->offset_slot_tab_1[adjusted_offset] = slot;
2216         }
2217
2218         /* slots [30, 49]  */
2219         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2220              adjusted_offset += (u32)1 << 14)
2221         {
2222                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2223                         slot++;
2224                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2225         }
2226 }
2227
2228 static size_t
2229 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2230 {
2231         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2232                 if (lzx_is_16_bit(max_bufsize))
2233                         return offsetof(struct lzx_compressor, hc_mf_16) +
2234                                hc_matchfinder_size_16(max_bufsize);
2235                 else
2236                         return offsetof(struct lzx_compressor, hc_mf_32) +
2237                                hc_matchfinder_size_32(max_bufsize);
2238         } else {
2239                 if (lzx_is_16_bit(max_bufsize))
2240                         return offsetof(struct lzx_compressor, bt_mf_16) +
2241                                bt_matchfinder_size_16(max_bufsize);
2242                 else
2243                         return offsetof(struct lzx_compressor, bt_mf_32) +
2244                                bt_matchfinder_size_32(max_bufsize);
2245         }
2246 }
2247
2248 static u64
2249 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2250                       bool destructive)
2251 {
2252         u64 size = 0;
2253
2254         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2255                 return 0;
2256
2257         size += lzx_get_compressor_size(max_bufsize, compression_level);
2258         if (!destructive)
2259                 size += max_bufsize; /* in_buffer */
2260         return size;
2261 }
2262
2263 static int
2264 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2265                       bool destructive, void **c_ret)
2266 {
2267         unsigned window_order;
2268         struct lzx_compressor *c;
2269
2270         window_order = lzx_get_window_order(max_bufsize);
2271         if (window_order == 0)
2272                 return WIMLIB_ERR_INVALID_PARAM;
2273
2274         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2275         if (!c)
2276                 goto oom0;
2277
2278         c->destructive = destructive;
2279
2280         c->num_main_syms = lzx_get_num_main_syms(window_order);
2281         c->window_order = window_order;
2282
2283         if (!c->destructive) {
2284                 c->in_buffer = MALLOC(max_bufsize);
2285                 if (!c->in_buffer)
2286                         goto oom1;
2287         }
2288
2289         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2290
2291                 /* Fast compression: Use lazy parsing.  */
2292
2293                 if (lzx_is_16_bit(max_bufsize))
2294                         c->impl = lzx_compress_lazy_16;
2295                 else
2296                         c->impl = lzx_compress_lazy_32;
2297                 c->max_search_depth = (36 * compression_level) / 20;
2298                 c->nice_match_length = (72 * compression_level) / 20;
2299
2300                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2301                  * halves the max_search_depth when attempting a lazy match, and
2302                  * max_search_depth cannot be 0.  */
2303                 if (c->max_search_depth < 2)
2304                         c->max_search_depth = 2;
2305         } else {
2306
2307                 /* Normal / high compression: Use near-optimal parsing.  */
2308
2309                 if (lzx_is_16_bit(max_bufsize))
2310                         c->impl = lzx_compress_near_optimal_16;
2311                 else
2312                         c->impl = lzx_compress_near_optimal_32;
2313
2314                 /* Scale nice_match_length and max_search_depth with the
2315                  * compression level.  */
2316                 c->max_search_depth = (24 * compression_level) / 50;
2317                 c->nice_match_length = (32 * compression_level) / 50;
2318
2319                 /* Set a number of optimization passes appropriate for the
2320                  * compression level.  */
2321
2322                 c->num_optim_passes = 1;
2323
2324                 if (compression_level >= 45)
2325                         c->num_optim_passes++;
2326
2327                 /* Use more optimization passes for higher compression levels.
2328                  * But the more passes there are, the less they help --- so
2329                  * don't add them linearly.  */
2330                 if (compression_level >= 70) {
2331                         c->num_optim_passes++;
2332                         if (compression_level >= 100)
2333                                 c->num_optim_passes++;
2334                         if (compression_level >= 150)
2335                                 c->num_optim_passes++;
2336                         if (compression_level >= 200)
2337                                 c->num_optim_passes++;
2338                         if (compression_level >= 300)
2339                                 c->num_optim_passes++;
2340                 }
2341         }
2342
2343         /* max_search_depth == 0 is invalid.  */
2344         if (c->max_search_depth < 1)
2345                 c->max_search_depth = 1;
2346
2347         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2348                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2349
2350         lzx_init_offset_slot_tabs(c);
2351         *c_ret = c;
2352         return 0;
2353
2354 oom1:
2355         FREE(c);
2356 oom0:
2357         return WIMLIB_ERR_NOMEM;
2358 }
2359
2360 static size_t
2361 lzx_compress(const void *restrict in, size_t in_nbytes,
2362              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2363 {
2364         struct lzx_compressor *c = _c;
2365         struct lzx_output_bitstream os;
2366         size_t result;
2367
2368         /* Don't bother trying to compress very small inputs.  */
2369         if (in_nbytes < 100)
2370                 return 0;
2371
2372         /* Copy the input data into the internal buffer and preprocess it.  */
2373         if (c->destructive)
2374                 c->in_buffer = (void *)in;
2375         else
2376                 memcpy(c->in_buffer, in, in_nbytes);
2377         c->in_nbytes = in_nbytes;
2378         lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2379
2380         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2381         c->codes_index = 0;
2382         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2383
2384         /* Initialize the output bitstream.  */
2385         lzx_init_output(&os, out, out_nbytes_avail);
2386
2387         /* Call the compression level-specific compress() function.  */
2388         (*c->impl)(c, &os);
2389
2390         /* Flush the output bitstream and return the compressed size or 0.  */
2391         result = lzx_flush_output(&os);
2392         if (!result && c->destructive)
2393                 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2394         return result;
2395 }
2396
2397 static void
2398 lzx_free_compressor(void *_c)
2399 {
2400         struct lzx_compressor *c = _c;
2401
2402         if (!c->destructive)
2403                 FREE(c->in_buffer);
2404         FREE(c);
2405 }
2406
2407 const struct compressor_ops lzx_compressor_ops = {
2408         .get_needed_memory  = lzx_get_needed_memory,
2409         .create_compressor  = lzx_create_compressor,
2410         .compress           = lzx_compress,
2411         .free_compressor    = lzx_free_compressor,
2412 };