]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
1bf1bf0970695382bedbe46f2e113c133f3db80d
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * Start a new LZX block (with new Huffman codes) after this many bytes.
69  *
70  * Note: actual block sizes may slightly exceed this value.
71  *
72  * TODO: recursive splitting and cost evaluation might be good for an extremely
73  * high compression mode, but otherwise it is almost always far too slow for how
74  * much it helps.  Perhaps some sort of heuristic would be useful?
75  */
76 #define LZX_DIV_BLOCK_SIZE      32768
77
78 /*
79  * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80  * match cache for each byte position.  This value should be high enough so that
81  * nearly the time, all matches found in a given block can fit in the match
82  * cache.  However, fallback behavior (immediately terminating the block) on
83  * cache overflow is still required.
84  */
85 #define LZX_CACHE_PER_POS       7
86
87 /*
88  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89  * excluding the extra "overflow" entries.  The per-position multiplier is '1 +
90  * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91  * overhead of one lz_match per position, used to hold the match count at that
92  * position.
93  */
94 #define LZX_CACHE_LENGTH        (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
95
96 /*
97  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98  * ever be saved in the match cache for a single position.  Since each match we
99  * save for a single position has a distinct length, we can use the number of
100  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
101  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102  * it will never actually be reached.
103  */
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
105
106 /*
107  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108  * This makes it possible to consider fractional bit costs.
109  *
110  * Note: this is only useful as a statistical trick for when the true costs are
111  * unknown.  In reality, each token in LZX requires a whole number of bits to
112  * output.
113  */
114 #define LZX_BIT_COST            16
115
116 /*
117  * Should the compressor take into account the costs of aligned offset symbols?
118  */
119 #define LZX_CONSIDER_ALIGNED_COSTS      1
120
121 /*
122  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
123  * faster algorithm.
124  */
125 #define LZX_MAX_FAST_LEVEL      34
126
127 /*
128  * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
129  * hash table for finding length 2 matches.  This could be as high as 16, but
130  * using a smaller hash table speeds up compression due to reduced cache
131  * pressure.
132  */
133 #define BT_MATCHFINDER_HASH2_ORDER      12
134
135 /*
136  * These are the compressor-side limits on the codeword lengths for each Huffman
137  * code.  To make outputting bits slightly faster, some of these limits are
138  * lower than the limits defined by the LZX format.  This does not significantly
139  * affect the compression ratio, at least for the block sizes we use.
140  */
141 #define MAIN_CODEWORD_LIMIT     12      /* 64-bit: can buffer 4 main symbols  */
142 #define LENGTH_CODEWORD_LIMIT   12
143 #define ALIGNED_CODEWORD_LIMIT  7
144 #define PRE_CODEWORD_LIMIT      7
145
146 #include "wimlib/compress_common.h"
147 #include "wimlib/compressor_ops.h"
148 #include "wimlib/error.h"
149 #include "wimlib/lz_extend.h"
150 #include "wimlib/lzx_common.h"
151 #include "wimlib/unaligned.h"
152 #include "wimlib/util.h"
153
154 /* Matchfinders with 16-bit positions  */
155 #define mf_pos_t        u16
156 #define MF_SUFFIX       _16
157 #include "wimlib/bt_matchfinder.h"
158 #include "wimlib/hc_matchfinder.h"
159
160 /* Matchfinders with 32-bit positions  */
161 #undef mf_pos_t
162 #undef MF_SUFFIX
163 #define mf_pos_t        u32
164 #define MF_SUFFIX       _32
165 #include "wimlib/bt_matchfinder.h"
166 #include "wimlib/hc_matchfinder.h"
167
168 struct lzx_output_bitstream;
169
170 /* Codewords for the LZX Huffman codes.  */
171 struct lzx_codewords {
172         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
173         u32 len[LZX_LENCODE_NUM_SYMBOLS];
174         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
175 };
176
177 /* Codeword lengths (in bits) for the LZX Huffman codes.
178  * A zero length means the corresponding codeword has zero frequency.  */
179 struct lzx_lens {
180         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
181         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
182         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
183 };
184
185 /* Cost model for near-optimal parsing  */
186 struct lzx_costs {
187
188         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
189          * length 'len' match that has an offset belonging to 'offset_slot'.  */
190         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
191
192         /* Cost for each symbol in the main code  */
193         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
194
195         /* Cost for each symbol in the length code  */
196         u32 len[LZX_LENCODE_NUM_SYMBOLS];
197
198 #if LZX_CONSIDER_ALIGNED_COSTS
199         /* Cost for each symbol in the aligned code  */
200         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
201 #endif
202 };
203
204 /* Codewords and lengths for the LZX Huffman codes.  */
205 struct lzx_codes {
206         struct lzx_codewords codewords;
207         struct lzx_lens lens;
208 };
209
210 /* Symbol frequency counters for the LZX Huffman codes.  */
211 struct lzx_freqs {
212         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
213         u32 len[LZX_LENCODE_NUM_SYMBOLS];
214         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
215 };
216
217 /*
218  * Represents a run of literals followed by a match or end-of-block.  This
219  * struct is needed to temporarily store items chosen by the parser, since items
220  * cannot be written until all items for the block have been chosen and the
221  * block's Huffman codes have been computed.
222  */
223 struct lzx_sequence {
224
225         /* The number of literals in the run.  This may be 0.  The literals are
226          * not stored explicitly in this structure; instead, they are read
227          * directly from the uncompressed data.  */
228         u16 litrunlen;
229
230         /* If the next field doesn't indicate end-of-block, then this is the
231          * match length minus LZX_MIN_MATCH_LEN.  */
232         u16 adjusted_length;
233
234         /* If bit 31 is clear, then this field contains the match header in bits
235          * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
236          * Otherwise, this sequence's literal run was the last literal run in
237          * the block, so there is no match that follows it.  */
238         u32 adjusted_offset_and_match_hdr;
239 };
240
241 /*
242  * This structure represents a byte position in the input buffer and a node in
243  * the graph of possible match/literal choices.
244  *
245  * Logically, each incoming edge to this node is labeled with a literal or a
246  * match that can be taken to reach this position from an earlier position; and
247  * each outgoing edge from this node is labeled with a literal or a match that
248  * can be taken to advance from this position to a later position.
249  */
250 struct lzx_optimum_node {
251
252         /* The cost, in bits, of the lowest-cost path that has been found to
253          * reach this position.  This can change as progressively lower cost
254          * paths are found to reach this position.  */
255         u32 cost;
256
257         /*
258          * The match or literal that was taken to reach this position.  This can
259          * change as progressively lower cost paths are found to reach this
260          * position.
261          *
262          * This variable is divided into two bitfields.
263          *
264          * Literals:
265          *      Low bits are 0, high bits are the literal.
266          *
267          * Explicit offset matches:
268          *      Low bits are the match length, high bits are the offset plus 2.
269          *
270          * Repeat offset matches:
271          *      Low bits are the match length, high bits are the queue index.
272          */
273         u32 item;
274 #define OPTIMUM_OFFSET_SHIFT 9
275 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
276 } _aligned_attribute(8);
277
278 /*
279  * Least-recently-used queue for match offsets.
280  *
281  * This is represented as a 64-bit integer for efficiency.  There are three
282  * offsets of 21 bits each.  Bit 64 is garbage.
283  */
284 struct lzx_lru_queue {
285         u64 R;
286 };
287
288 #define LZX_QUEUE64_OFFSET_SHIFT 21
289 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
290
291 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
292 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
293 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
294
295 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
296 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
297 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
298
299 static inline void
300 lzx_lru_queue_init(struct lzx_lru_queue *queue)
301 {
302         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
303                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
304                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
305 }
306
307 static inline u64
308 lzx_lru_queue_R0(struct lzx_lru_queue queue)
309 {
310         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
311 }
312
313 static inline u64
314 lzx_lru_queue_R1(struct lzx_lru_queue queue)
315 {
316         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
317 }
318
319 static inline u64
320 lzx_lru_queue_R2(struct lzx_lru_queue queue)
321 {
322         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
323 }
324
325 /* Push a match offset onto the front (most recently used) end of the queue.  */
326 static inline struct lzx_lru_queue
327 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
328 {
329         return (struct lzx_lru_queue) {
330                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
331         };
332 }
333
334 /* Swap a match offset to the front of the queue.  */
335 static inline struct lzx_lru_queue
336 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
337 {
338         if (idx == 0)
339                 return queue;
340
341         if (idx == 1)
342                 return (struct lzx_lru_queue) {
343                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
344                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
345                              (queue.R & LZX_QUEUE64_R2_MASK),
346                 };
347
348         return (struct lzx_lru_queue) {
349                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
350                      (queue.R & LZX_QUEUE64_R1_MASK) |
351                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
352         };
353 }
354
355 /* The main LZX compressor structure  */
356 struct lzx_compressor {
357
358         /* The "nice" match length: if a match of this length is found, then
359          * choose it immediately without further consideration.  */
360         unsigned nice_match_length;
361
362         /* The maximum search depth: consider at most this many potential
363          * matches at each position.  */
364         unsigned max_search_depth;
365
366         /* The log base 2 of the LZX window size for LZ match offset encoding
367          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
368          * LZX_MAX_WINDOW_ORDER.  */
369         unsigned window_order;
370
371         /* The number of symbols in the main alphabet.  This depends on
372          * @window_order, since @window_order determines the maximum possible
373          * offset.  */
374         unsigned num_main_syms;
375
376         /* Number of optimization passes per block  */
377         unsigned num_optim_passes;
378
379         /* The preprocessed buffer of data being compressed  */
380         u8 *in_buffer;
381
382         /* The number of bytes of data to be compressed, which is the number of
383          * bytes of data in @in_buffer that are actually valid.  */
384         size_t in_nbytes;
385
386         /* Pointer to the compress() implementation chosen at allocation time */
387         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
388
389         /* If true, the compressor need not preserve the input buffer if it
390          * compresses the data successfully.  */
391         bool destructive;
392
393         /* The Huffman symbol frequency counters for the current block.  */
394         struct lzx_freqs freqs;
395
396         /* The Huffman codes for the current and previous blocks.  The one with
397          * index 'codes_index' is for the current block, and the other one is
398          * for the previous block.  */
399         struct lzx_codes codes[2];
400         unsigned codes_index;
401
402         /* The matches and literals that the parser has chosen for the current
403          * block.  The required length of this array is limited by the maximum
404          * number of matches that can ever be chosen for a single block, plus
405          * one for the special entry at the end.  */
406         struct lzx_sequence chosen_sequences[
407                        DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
408
409         /* Tables for mapping adjusted offsets to offset slots  */
410
411         /* offset slots [0, 29]  */
412         u8 offset_slot_tab_1[32768];
413
414         /* offset slots [30, 49]  */
415         u8 offset_slot_tab_2[128];
416
417         union {
418                 /* Data for greedy or lazy parsing  */
419                 struct {
420                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
421                         union {
422                                 struct hc_matchfinder_16 hc_mf_16;
423                                 struct hc_matchfinder_32 hc_mf_32;
424                         };
425                 };
426
427                 /* Data for near-optimal parsing  */
428                 struct {
429                         /*
430                          * The graph nodes for the current block.
431                          *
432                          * We need at least 'LZX_DIV_BLOCK_SIZE +
433                          * LZX_MAX_MATCH_LEN - 1' nodes because that is the
434                          * maximum block size that may be used.  Add 1 because
435                          * we need a node to represent end-of-block.
436                          *
437                          * It is possible that nodes past end-of-block are
438                          * accessed during match consideration, but this can
439                          * only occur if the block was truncated at
440                          * LZX_DIV_BLOCK_SIZE.  So the same bound still applies.
441                          * Note that since nodes past the end of the block will
442                          * never actually have an effect on the items that are
443                          * chosen for the block, it makes no difference what
444                          * their costs are initialized to (if anything).
445                          */
446                         struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
447                                                               LZX_MAX_MATCH_LEN - 1 + 1];
448
449                         /* The cost model for the current block  */
450                         struct lzx_costs costs;
451
452                         /*
453                          * Cached matches for the current block.  This array
454                          * contains the matches that were found at each position
455                          * in the block.  Specifically, for each position, there
456                          * is a special 'struct lz_match' whose 'length' field
457                          * contains the number of matches that were found at
458                          * that position; this is followed by the matches
459                          * themselves, if any, sorted by strictly increasing
460                          * length.
461                          *
462                          * Note: in rare cases, there will be a very high number
463                          * of matches in the block and this array will overflow.
464                          * If this happens, we force the end of the current
465                          * block.  LZX_CACHE_LENGTH is the length at which we
466                          * actually check for overflow.  The extra slots beyond
467                          * this are enough to absorb the worst case overflow,
468                          * which occurs if starting at
469                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
470                          * match count header, then write
471                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
472                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
473                          * write the match count header for each.
474                          */
475                         struct lz_match match_cache[LZX_CACHE_LENGTH +
476                                                     LZX_MAX_MATCHES_PER_POS +
477                                                     LZX_MAX_MATCH_LEN - 1];
478
479                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
480                         union {
481                                 struct bt_matchfinder_16 bt_mf_16;
482                                 struct bt_matchfinder_32 bt_mf_32;
483                         };
484                 };
485         };
486 };
487
488 /*
489  * Will a matchfinder using 16-bit positions be sufficient for compressing
490  * buffers of up to the specified size?  The limit could be 65536 bytes, but we
491  * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
492  * This requires that the limit be no more than the length of offset_slot_tab_1
493  * (currently 32768).
494  */
495 static inline bool
496 lzx_is_16_bit(size_t max_bufsize)
497 {
498         STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
499         return max_bufsize <= 32768;
500 }
501
502 /*
503  * The following macros call either the 16-bit or the 32-bit version of a
504  * matchfinder function based on the value of 'is_16_bit', which will be known
505  * at compilation time.
506  */
507
508 #define CALL_HC_MF(is_16_bit, c, funcname, ...)                               \
509         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
510                        CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
511
512 #define CALL_BT_MF(is_16_bit, c, funcname, ...)                               \
513         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
514                        CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
515
516 /*
517  * Structure to keep track of the current state of sending bits to the
518  * compressed output buffer.
519  *
520  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
521  */
522 struct lzx_output_bitstream {
523
524         /* Bits that haven't yet been written to the output buffer.  */
525         machine_word_t bitbuf;
526
527         /* Number of bits currently held in @bitbuf.  */
528         u32 bitcount;
529
530         /* Pointer to the start of the output buffer.  */
531         u8 *start;
532
533         /* Pointer to the position in the output buffer at which the next coding
534          * unit should be written.  */
535         u8 *next;
536
537         /* Pointer just past the end of the output buffer, rounded down to a
538          * 2-byte boundary.  */
539         u8 *end;
540 };
541
542 /* Can the specified number of bits always be added to 'bitbuf' after any
543  * pending 16-bit coding units have been flushed?  */
544 #define CAN_BUFFER(n)   ((n) <= (8 * sizeof(machine_word_t)) - 15)
545
546 /*
547  * Initialize the output bitstream.
548  *
549  * @os
550  *      The output bitstream structure to initialize.
551  * @buffer
552  *      The buffer being written to.
553  * @size
554  *      Size of @buffer, in bytes.
555  */
556 static void
557 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
558 {
559         os->bitbuf = 0;
560         os->bitcount = 0;
561         os->start = buffer;
562         os->next = os->start;
563         os->end = os->start + (size & ~1);
564 }
565
566 /* Add some bits to the bitbuffer variable of the output bitstream.  The caller
567  * must make sure there is enough room.  */
568 static inline void
569 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
570 {
571         os->bitbuf = (os->bitbuf << num_bits) | bits;
572         os->bitcount += num_bits;
573 }
574
575 /* Flush bits from the bitbuffer variable to the output buffer.  'max_num_bits'
576  * specifies the maximum number of bits that may have been added since the last
577  * flush.  */
578 static inline void
579 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
580 {
581         /* Masking the number of bits to shift is only needed to avoid undefined
582          * behavior; we don't actually care about the results of bad shifts.  On
583          * x86, the explicit masking generates no extra code.  */
584         const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
585
586         if (os->end - os->next < 6)
587                 return;
588         put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) &
589                                             shift_mask), os->next + 0);
590         if (max_num_bits > 16)
591                 put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) &
592                                                 shift_mask), os->next + 2);
593         if (max_num_bits > 32)
594                 put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) &
595                                                 shift_mask), os->next + 4);
596         os->next += (os->bitcount >> 4) << 1;
597         os->bitcount &= 15;
598 }
599
600 /* Add at most 16 bits to the bitbuffer and flush it.  */
601 static inline void
602 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
603 {
604         lzx_add_bits(os, bits, num_bits);
605         lzx_flush_bits(os, 16);
606 }
607
608 /*
609  * Flush the last coding unit to the output buffer if needed.  Return the total
610  * number of bytes written to the output buffer, or 0 if an overflow occurred.
611  */
612 static u32
613 lzx_flush_output(struct lzx_output_bitstream *os)
614 {
615         if (os->end - os->next < 6)
616                 return 0;
617
618         if (os->bitcount != 0) {
619                 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
620                 os->next += 2;
621         }
622
623         return os->next - os->start;
624 }
625
626 /* Build the main, length, and aligned offset Huffman codes used in LZX.
627  *
628  * This takes as input the frequency tables for each code and produces as output
629  * a set of tables that map symbols to codewords and codeword lengths.  */
630 static void
631 lzx_make_huffman_codes(struct lzx_compressor *c)
632 {
633         const struct lzx_freqs *freqs = &c->freqs;
634         struct lzx_codes *codes = &c->codes[c->codes_index];
635
636         STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
637                       MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
638         STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
639                       LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
640         STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
641                       ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
642
643         make_canonical_huffman_code(c->num_main_syms,
644                                     MAIN_CODEWORD_LIMIT,
645                                     freqs->main,
646                                     codes->lens.main,
647                                     codes->codewords.main);
648
649         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
650                                     LENGTH_CODEWORD_LIMIT,
651                                     freqs->len,
652                                     codes->lens.len,
653                                     codes->codewords.len);
654
655         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
656                                     ALIGNED_CODEWORD_LIMIT,
657                                     freqs->aligned,
658                                     codes->lens.aligned,
659                                     codes->codewords.aligned);
660 }
661
662 /* Reset the symbol frequencies for the LZX Huffman codes.  */
663 static void
664 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
665 {
666         memset(&c->freqs, 0, sizeof(c->freqs));
667 }
668
669 static unsigned
670 lzx_compute_precode_items(const u8 lens[restrict],
671                           const u8 prev_lens[restrict],
672                           u32 precode_freqs[restrict],
673                           unsigned precode_items[restrict])
674 {
675         unsigned *itemptr;
676         unsigned run_start;
677         unsigned run_end;
678         unsigned extra_bits;
679         int delta;
680         u8 len;
681
682         itemptr = precode_items;
683         run_start = 0;
684
685         while (!((len = lens[run_start]) & 0x80)) {
686
687                 /* len = the length being repeated  */
688
689                 /* Find the next run of codeword lengths.  */
690
691                 run_end = run_start + 1;
692
693                 /* Fast case for a single length.  */
694                 if (likely(len != lens[run_end])) {
695                         delta = prev_lens[run_start] - len;
696                         if (delta < 0)
697                                 delta += 17;
698                         precode_freqs[delta]++;
699                         *itemptr++ = delta;
700                         run_start++;
701                         continue;
702                 }
703
704                 /* Extend the run.  */
705                 do {
706                         run_end++;
707                 } while (len == lens[run_end]);
708
709                 if (len == 0) {
710                         /* Run of zeroes.  */
711
712                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
713                         while ((run_end - run_start) >= 20) {
714                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
715                                 precode_freqs[18]++;
716                                 *itemptr++ = 18 | (extra_bits << 5);
717                                 run_start += 20 + extra_bits;
718                         }
719
720                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
721                         if ((run_end - run_start) >= 4) {
722                                 extra_bits = min((run_end - run_start) - 4, 0xf);
723                                 precode_freqs[17]++;
724                                 *itemptr++ = 17 | (extra_bits << 5);
725                                 run_start += 4 + extra_bits;
726                         }
727                 } else {
728
729                         /* A run of nonzero lengths. */
730
731                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
732                         while ((run_end - run_start) >= 4) {
733                                 extra_bits = (run_end - run_start) > 4;
734                                 delta = prev_lens[run_start] - len;
735                                 if (delta < 0)
736                                         delta += 17;
737                                 precode_freqs[19]++;
738                                 precode_freqs[delta]++;
739                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
740                                 run_start += 4 + extra_bits;
741                         }
742                 }
743
744                 /* Output any remaining lengths without RLE.  */
745                 while (run_start != run_end) {
746                         delta = prev_lens[run_start] - len;
747                         if (delta < 0)
748                                 delta += 17;
749                         precode_freqs[delta]++;
750                         *itemptr++ = delta;
751                         run_start++;
752                 }
753         }
754
755         return itemptr - precode_items;
756 }
757
758 /*
759  * Output a Huffman code in the compressed form used in LZX.
760  *
761  * The Huffman code is represented in the output as a logical series of codeword
762  * lengths from which the Huffman code, which must be in canonical form, can be
763  * reconstructed.
764  *
765  * The codeword lengths are themselves compressed using a separate Huffman code,
766  * the "precode", which contains a symbol for each possible codeword length in
767  * the larger code as well as several special symbols to represent repeated
768  * codeword lengths (a form of run-length encoding).  The precode is itself
769  * constructed in canonical form, and its codeword lengths are represented
770  * literally in 20 4-bit fields that immediately precede the compressed codeword
771  * lengths of the larger code.
772  *
773  * Furthermore, the codeword lengths of the larger code are actually represented
774  * as deltas from the codeword lengths of the corresponding code in the previous
775  * block.
776  *
777  * @os:
778  *      Bitstream to which to write the compressed Huffman code.
779  * @lens:
780  *      The codeword lengths, indexed by symbol, in the Huffman code.
781  * @prev_lens:
782  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
783  *      code in the previous block, or all zeroes if this is the first block.
784  * @num_lens:
785  *      The number of symbols in the Huffman code.
786  */
787 static void
788 lzx_write_compressed_code(struct lzx_output_bitstream *os,
789                           const u8 lens[restrict],
790                           const u8 prev_lens[restrict],
791                           unsigned num_lens)
792 {
793         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
794         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
795         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
796         unsigned precode_items[num_lens];
797         unsigned num_precode_items;
798         unsigned precode_item;
799         unsigned precode_sym;
800         unsigned i;
801         u8 saved = lens[num_lens];
802         *(u8 *)(lens + num_lens) = 0x80;
803
804         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
805                 precode_freqs[i] = 0;
806
807         /* Compute the "items" (RLE / literal tokens and extra bits) with which
808          * the codeword lengths in the larger code will be output.  */
809         num_precode_items = lzx_compute_precode_items(lens,
810                                                       prev_lens,
811                                                       precode_freqs,
812                                                       precode_items);
813
814         /* Build the precode.  */
815         STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
816                       PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
817         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
818                                     PRE_CODEWORD_LIMIT,
819                                     precode_freqs, precode_lens,
820                                     precode_codewords);
821
822         /* Output the lengths of the codewords in the precode.  */
823         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
824                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
825
826         /* Output the encoded lengths of the codewords in the larger code.  */
827         for (i = 0; i < num_precode_items; i++) {
828                 precode_item = precode_items[i];
829                 precode_sym = precode_item & 0x1F;
830                 lzx_add_bits(os, precode_codewords[precode_sym],
831                              precode_lens[precode_sym]);
832                 if (precode_sym >= 17) {
833                         if (precode_sym == 17) {
834                                 lzx_add_bits(os, precode_item >> 5, 4);
835                         } else if (precode_sym == 18) {
836                                 lzx_add_bits(os, precode_item >> 5, 5);
837                         } else {
838                                 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
839                                 precode_sym = precode_item >> 6;
840                                 lzx_add_bits(os, precode_codewords[precode_sym],
841                                              precode_lens[precode_sym]);
842                         }
843                 }
844                 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
845                 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
846         }
847
848         *(u8 *)(lens + num_lens) = saved;
849 }
850
851 /*
852  * Write all matches and literal bytes (which were precomputed) in an LZX
853  * compressed block to the output bitstream in the final compressed
854  * representation.
855  *
856  * @os
857  *      The output bitstream.
858  * @block_type
859  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
860  *      LZX_BLOCKTYPE_VERBATIM).
861  * @block_data
862  *      The uncompressed data of the block.
863  * @sequences
864  *      The matches and literals to output, given as a series of sequences.
865  * @codes
866  *      The main, length, and aligned offset Huffman codes for the current
867  *      LZX compressed block.
868  */
869 static void
870 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
871                     const u8 *block_data, const struct lzx_sequence sequences[],
872                     const struct lzx_codes *codes)
873 {
874         const struct lzx_sequence *seq = sequences;
875         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
876
877         for (;;) {
878                 /* Output the next sequence.  */
879
880                 unsigned litrunlen = seq->litrunlen;
881                 unsigned match_hdr;
882                 unsigned main_symbol;
883                 unsigned adjusted_length;
884                 u32 adjusted_offset;
885                 unsigned offset_slot;
886                 unsigned num_extra_bits;
887                 u32 extra_bits;
888
889                 /* Output the literal run of the sequence.  */
890
891                 if (litrunlen) {  /* Is the literal run nonempty?  */
892
893                         /* Verify optimization is enabled on 64-bit  */
894                         STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
895                                       CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
896
897                         if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
898
899                                 /* 64-bit: write 4 literals at a time.  */
900                                 while (litrunlen >= 4) {
901                                         unsigned lit0 = block_data[0];
902                                         unsigned lit1 = block_data[1];
903                                         unsigned lit2 = block_data[2];
904                                         unsigned lit3 = block_data[3];
905                                         lzx_add_bits(os, codes->codewords.main[lit0],
906                                                      codes->lens.main[lit0]);
907                                         lzx_add_bits(os, codes->codewords.main[lit1],
908                                                      codes->lens.main[lit1]);
909                                         lzx_add_bits(os, codes->codewords.main[lit2],
910                                                      codes->lens.main[lit2]);
911                                         lzx_add_bits(os, codes->codewords.main[lit3],
912                                                      codes->lens.main[lit3]);
913                                         lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
914                                         block_data += 4;
915                                         litrunlen -= 4;
916                                 }
917                                 if (litrunlen--) {
918                                         unsigned lit = *block_data++;
919                                         lzx_add_bits(os, codes->codewords.main[lit],
920                                                      codes->lens.main[lit]);
921                                         if (litrunlen--) {
922                                                 unsigned lit = *block_data++;
923                                                 lzx_add_bits(os, codes->codewords.main[lit],
924                                                              codes->lens.main[lit]);
925                                                 if (litrunlen--) {
926                                                         unsigned lit = *block_data++;
927                                                         lzx_add_bits(os, codes->codewords.main[lit],
928                                                                      codes->lens.main[lit]);
929                                                         lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
930                                                 } else {
931                                                         lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
932                                                 }
933                                         } else {
934                                                 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
935                                         }
936                                 }
937                         } else {
938                                 /* 32-bit: write 1 literal at a time.  */
939                                 do {
940                                         unsigned lit = *block_data++;
941                                         lzx_add_bits(os, codes->codewords.main[lit],
942                                                      codes->lens.main[lit]);
943                                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
944                                 } while (--litrunlen);
945                         }
946                 }
947
948                 /* Was this the last literal run?  */
949                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
950                         return;
951
952                 /* Nope; output the match.  */
953
954                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
955                 main_symbol = LZX_NUM_CHARS + match_hdr;
956                 adjusted_length = seq->adjusted_length;
957
958                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
959
960                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
961                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
962
963                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
964                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
965
966         #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
967                                  14 + ALIGNED_CODEWORD_LIMIT)
968
969                 /* Verify optimization is enabled on 64-bit  */
970                 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
971
972                 /* Output the main symbol for the match.  */
973
974                 lzx_add_bits(os, codes->codewords.main[main_symbol],
975                              codes->lens.main[main_symbol]);
976                 if (!CAN_BUFFER(MAX_MATCH_BITS))
977                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
978
979                 /* If needed, output the length symbol for the match.  */
980
981                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
982                         lzx_add_bits(os, codes->codewords.len[adjusted_length -
983                                                               LZX_NUM_PRIMARY_LENS],
984                                      codes->lens.len[adjusted_length -
985                                                      LZX_NUM_PRIMARY_LENS]);
986                         if (!CAN_BUFFER(MAX_MATCH_BITS))
987                                 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
988                 }
989
990                 /* Output the extra offset bits for the match.  In aligned
991                  * offset blocks, the lowest 3 bits of the adjusted offset are
992                  * Huffman-encoded using the aligned offset code, provided that
993                  * there are at least extra 3 offset bits required.  All other
994                  * extra offset bits are output verbatim.  */
995
996                 if ((adjusted_offset & ones_if_aligned) >= 16) {
997
998                         lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
999                                      num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1000                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1001                                 lzx_flush_bits(os, 14);
1002
1003                         lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1004                                                                   LZX_ALIGNED_OFFSET_BITMASK],
1005                                      codes->lens.aligned[adjusted_offset &
1006                                                          LZX_ALIGNED_OFFSET_BITMASK]);
1007                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1008                                 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1009                 } else {
1010                         STATIC_ASSERT(CAN_BUFFER(17));
1011
1012                         lzx_add_bits(os, extra_bits, num_extra_bits);
1013                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1014                                 lzx_flush_bits(os, 17);
1015                 }
1016
1017                 if (CAN_BUFFER(MAX_MATCH_BITS))
1018                         lzx_flush_bits(os, MAX_MATCH_BITS);
1019
1020                 /* Advance to the next sequence.  */
1021                 seq++;
1022         }
1023 }
1024
1025 static void
1026 lzx_write_compressed_block(const u8 *block_begin,
1027                            int block_type,
1028                            u32 block_size,
1029                            unsigned window_order,
1030                            unsigned num_main_syms,
1031                            const struct lzx_sequence sequences[],
1032                            const struct lzx_codes * codes,
1033                            const struct lzx_lens * prev_lens,
1034                            struct lzx_output_bitstream * os)
1035 {
1036         /* The first three bits indicate the type of block and are one of the
1037          * LZX_BLOCKTYPE_* constants.  */
1038         lzx_write_bits(os, block_type, 3);
1039
1040         /* Output the block size.
1041          *
1042          * The original LZX format seemed to always encode the block size in 3
1043          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
1044          * uses the first bit to indicate whether the block is the default size
1045          * (32768) or a different size given explicitly by the next 16 bits.
1046          *
1047          * By default, this compressor uses a window size of 32768 and therefore
1048          * follows the WIMGAPI behavior.  However, this compressor also supports
1049          * window sizes greater than 32768 bytes, which do not appear to be
1050          * supported by WIMGAPI.  In such cases, we retain the default size bit
1051          * to mean a size of 32768 bytes but output non-default block size in 24
1052          * bits rather than 16.  The compatibility of this behavior is unknown
1053          * because WIMs created with chunk size greater than 32768 can seemingly
1054          * only be opened by wimlib anyway.  */
1055         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1056                 lzx_write_bits(os, 1, 1);
1057         } else {
1058                 lzx_write_bits(os, 0, 1);
1059
1060                 if (window_order >= 16)
1061                         lzx_write_bits(os, block_size >> 16, 8);
1062
1063                 lzx_write_bits(os, block_size & 0xFFFF, 16);
1064         }
1065
1066         /* If it's an aligned offset block, output the aligned offset code.  */
1067         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1068                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1069                         lzx_write_bits(os, codes->lens.aligned[i],
1070                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
1071                 }
1072         }
1073
1074         /* Output the main code (two parts).  */
1075         lzx_write_compressed_code(os, codes->lens.main,
1076                                   prev_lens->main,
1077                                   LZX_NUM_CHARS);
1078         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1079                                   prev_lens->main + LZX_NUM_CHARS,
1080                                   num_main_syms - LZX_NUM_CHARS);
1081
1082         /* Output the length code.  */
1083         lzx_write_compressed_code(os, codes->lens.len,
1084                                   prev_lens->len,
1085                                   LZX_LENCODE_NUM_SYMBOLS);
1086
1087         /* Output the compressed matches and literals.  */
1088         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1089 }
1090
1091 /* Given the frequencies of symbols in an LZX-compressed block and the
1092  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1093  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1094  * will take fewer bits to output.  */
1095 static int
1096 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1097                                const struct lzx_codes * codes)
1098 {
1099         u32 aligned_cost = 0;
1100         u32 verbatim_cost = 0;
1101
1102         /* A verbatim block requires 3 bits in each place that an aligned symbol
1103          * would be used in an aligned offset block.  */
1104         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1105                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1106                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1107         }
1108
1109         /* Account for output of the aligned offset code.  */
1110         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1111
1112         if (aligned_cost < verbatim_cost)
1113                 return LZX_BLOCKTYPE_ALIGNED;
1114         else
1115                 return LZX_BLOCKTYPE_VERBATIM;
1116 }
1117
1118 /*
1119  * Return the offset slot for the specified adjusted match offset, using the
1120  * compressor's acceleration tables to speed up the mapping.
1121  */
1122 static inline unsigned
1123 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1124                          bool is_16_bit)
1125 {
1126         if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1127                 return c->offset_slot_tab_1[adjusted_offset];
1128         return c->offset_slot_tab_2[adjusted_offset >> 14];
1129 }
1130
1131 /*
1132  * Finish an LZX block:
1133  *
1134  * - build the Huffman codes
1135  * - decide whether to output the block as VERBATIM or ALIGNED
1136  * - output the block
1137  * - swap the indices of the current and previous Huffman codes
1138  */
1139 static void
1140 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1141                  const u8 *block_begin, u32 block_size, u32 seq_idx)
1142 {
1143         int block_type;
1144
1145         lzx_make_huffman_codes(c);
1146
1147         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1148                                                     &c->codes[c->codes_index]);
1149         lzx_write_compressed_block(block_begin,
1150                                    block_type,
1151                                    block_size,
1152                                    c->window_order,
1153                                    c->num_main_syms,
1154                                    &c->chosen_sequences[seq_idx],
1155                                    &c->codes[c->codes_index],
1156                                    &c->codes[c->codes_index ^ 1].lens,
1157                                    os);
1158         c->codes_index ^= 1;
1159 }
1160
1161 /* Tally the Huffman symbol for a literal and increment the literal run length.
1162  */
1163 static inline void
1164 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1165 {
1166         c->freqs.main[literal]++;
1167         ++*litrunlen_p;
1168 }
1169
1170 /* Tally the Huffman symbol for a match, save the match data and the length of
1171  * the preceding literal run in the next lzx_sequence, and update the recent
1172  * offsets queue.  */
1173 static inline void
1174 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1175                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1176                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1177 {
1178         u32 litrunlen = *litrunlen_p;
1179         struct lzx_sequence *next_seq = *next_seq_p;
1180         unsigned offset_slot;
1181         unsigned v;
1182
1183         v = length - LZX_MIN_MATCH_LEN;
1184
1185         /* Save the literal run length and adjusted length.  */
1186         next_seq->litrunlen = litrunlen;
1187         next_seq->adjusted_length = v;
1188
1189         /* Compute the length header and tally the length symbol if needed  */
1190         if (v >= LZX_NUM_PRIMARY_LENS) {
1191                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1192                 v = LZX_NUM_PRIMARY_LENS;
1193         }
1194
1195         /* Compute the offset slot  */
1196         offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1197
1198         /* Compute the match header.  */
1199         v += offset_slot * LZX_NUM_LEN_HEADERS;
1200
1201         /* Save the adjusted offset and match header.  */
1202         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1203
1204         /* Tally the main symbol.  */
1205         c->freqs.main[LZX_NUM_CHARS + v]++;
1206
1207         /* Update the recent offsets queue.  */
1208         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1209                 /* Repeat offset match  */
1210                 swap(recent_offsets[0], recent_offsets[offset_data]);
1211         } else {
1212                 /* Explicit offset match  */
1213
1214                 /* Tally the aligned offset symbol if needed  */
1215                 if (offset_data >= 16)
1216                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1217
1218                 recent_offsets[2] = recent_offsets[1];
1219                 recent_offsets[1] = recent_offsets[0];
1220                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1221         }
1222
1223         /* Reset the literal run length and advance to the next sequence.  */
1224         *next_seq_p = next_seq + 1;
1225         *litrunlen_p = 0;
1226 }
1227
1228 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1229  * there is no match.  This literal run may be empty.  */
1230 static inline void
1231 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1232 {
1233         last_seq->litrunlen = litrunlen;
1234
1235         /* Special value to mark last sequence  */
1236         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1237 }
1238
1239 /*
1240  * Given the minimum-cost path computed through the item graph for the current
1241  * block, walk the path and count how many of each symbol in each Huffman-coded
1242  * alphabet would be required to output the items (matches and literals) along
1243  * the path.
1244  *
1245  * Note that the path will be walked backwards (from the end of the block to the
1246  * beginning of the block), but this doesn't matter because this function only
1247  * computes frequencies.
1248  */
1249 static inline void
1250 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1251 {
1252         u32 node_idx = block_size;
1253         for (;;) {
1254                 u32 len;
1255                 u32 offset_data;
1256                 unsigned v;
1257                 unsigned offset_slot;
1258
1259                 /* Tally literals until either a match or the beginning of the
1260                  * block is reached.  */
1261                 for (;;) {
1262                         u32 item = c->optimum_nodes[node_idx].item;
1263
1264                         len = item & OPTIMUM_LEN_MASK;
1265                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1266
1267                         if (len != 0)  /* Not a literal?  */
1268                                 break;
1269
1270                         /* Tally the main symbol for the literal.  */
1271                         c->freqs.main[offset_data]++;
1272
1273                         if (--node_idx == 0) /* Beginning of block was reached?  */
1274                                 return;
1275                 }
1276
1277                 node_idx -= len;
1278
1279                 /* Tally a match.  */
1280
1281                 /* Tally the aligned offset symbol if needed.  */
1282                 if (offset_data >= 16)
1283                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1284
1285                 /* Tally the length symbol if needed.  */
1286                 v = len - LZX_MIN_MATCH_LEN;;
1287                 if (v >= LZX_NUM_PRIMARY_LENS) {
1288                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1289                         v = LZX_NUM_PRIMARY_LENS;
1290                 }
1291
1292                 /* Tally the main symbol.  */
1293                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1294                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1295                 c->freqs.main[LZX_NUM_CHARS + v]++;
1296
1297                 if (node_idx == 0) /* Beginning of block was reached?  */
1298                         return;
1299         }
1300 }
1301
1302 /*
1303  * Like lzx_tally_item_list(), but this function also generates the list of
1304  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1305  * ready to be output to the bitstream after the Huffman codes are computed.
1306  * The lzx_sequences will be written to decreasing memory addresses as the path
1307  * is walked backwards, which means they will end up in the expected
1308  * first-to-last order.  The return value is the index in c->chosen_sequences at
1309  * which the lzx_sequences begin.
1310  */
1311 static inline u32
1312 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1313 {
1314         u32 node_idx = block_size;
1315         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1316         u32 lit_start_node;
1317
1318         /* Special value to mark last sequence  */
1319         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1320
1321         lit_start_node = node_idx;
1322         for (;;) {
1323                 u32 len;
1324                 u32 offset_data;
1325                 unsigned v;
1326                 unsigned offset_slot;
1327
1328                 /* Record literals until either a match or the beginning of the
1329                  * block is reached.  */
1330                 for (;;) {
1331                         u32 item = c->optimum_nodes[node_idx].item;
1332
1333                         len = item & OPTIMUM_LEN_MASK;
1334                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1335
1336                         if (len != 0) /* Not a literal?  */
1337                                 break;
1338
1339                         /* Tally the main symbol for the literal.  */
1340                         c->freqs.main[offset_data]++;
1341
1342                         if (--node_idx == 0) /* Beginning of block was reached?  */
1343                                 goto out;
1344                 }
1345
1346                 /* Save the literal run length for the next sequence (the
1347                  * "previous sequence" when walking backwards).  */
1348                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1349                 node_idx -= len;
1350                 lit_start_node = node_idx;
1351
1352                 /* Record a match.  */
1353
1354                 /* Tally the aligned offset symbol if needed.  */
1355                 if (offset_data >= 16)
1356                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1357
1358                 /* Save the adjusted length.  */
1359                 v = len - LZX_MIN_MATCH_LEN;
1360                 c->chosen_sequences[seq_idx].adjusted_length = v;
1361
1362                 /* Tally the length symbol if needed.  */
1363                 if (v >= LZX_NUM_PRIMARY_LENS) {
1364                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1365                         v = LZX_NUM_PRIMARY_LENS;
1366                 }
1367
1368                 /* Tally the main symbol.  */
1369                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1370                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1371                 c->freqs.main[LZX_NUM_CHARS + v]++;
1372
1373                 /* Save the adjusted offset and match header.  */
1374                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1375                                 (offset_data << 9) | v;
1376
1377                 if (node_idx == 0) /* Beginning of block was reached?  */
1378                         goto out;
1379         }
1380
1381 out:
1382         /* Save the literal run length for the first sequence.  */
1383         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1384
1385         /* Return the index in c->chosen_sequences at which the lzx_sequences
1386          * begin.  */
1387         return seq_idx;
1388 }
1389
1390 /*
1391  * Find an inexpensive path through the graph of possible match/literal choices
1392  * for the current block.  The nodes of the graph are
1393  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1394  * the current block, plus one extra node for end-of-block.  The edges of the
1395  * graph are matches and literals.  The goal is to find the minimum cost path
1396  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1397  *
1398  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1399  * proceeding forwards one node at a time.  At each node, a selection of matches
1400  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1401  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1402  * such a path is the lowest cost found so far to reach that later node, then
1403  * that later node is updated with the new path.
1404  *
1405  * Note that although this algorithm is based on minimum cost path search, due
1406  * to various simplifying assumptions the result is not guaranteed to be the
1407  * true minimum cost, or "optimal", path over the graph of all valid LZX
1408  * representations of this block.
1409  *
1410  * Also, note that because of the presence of the recent offsets queue (which is
1411  * a type of adaptive state), the algorithm cannot work backwards and compute
1412  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1413  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1414  * only an approximation.  It's possible for the globally optimal, minimum cost
1415  * path to contain a prefix, ending at a position, where that path prefix is
1416  * *not* the minimum cost path to that position.  This can happen if such a path
1417  * prefix results in a different adaptive state which results in lower costs
1418  * later.  The algorithm does not solve this problem; it only considers the
1419  * lowest cost to reach each individual position.
1420  */
1421 static inline struct lzx_lru_queue
1422 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1423                        const u8 * const restrict block_begin,
1424                        const u32 block_size,
1425                        const struct lzx_lru_queue initial_queue,
1426                        bool is_16_bit)
1427 {
1428         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1429         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1430         struct lz_match *cache_ptr = c->match_cache;
1431         const u8 *in_next = block_begin;
1432         const u8 * const block_end = block_begin + block_size;
1433
1434         /* Instead of storing the match offset LRU queues in the
1435          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1436          * storing them in a smaller array.  This works because the algorithm
1437          * only requires a limited history of the adaptive state.  Once a given
1438          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1439          * it is no longer needed.  */
1440         struct lzx_lru_queue queues[512];
1441
1442         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1443 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1444
1445         /* Initially, the cost to reach each node is "infinity".  */
1446         memset(c->optimum_nodes, 0xFF,
1447                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1448
1449         QUEUE(block_begin) = initial_queue;
1450
1451         /* The following loop runs 'block_size' iterations, one per node.  */
1452         do {
1453                 unsigned num_matches;
1454                 unsigned literal;
1455                 u32 cost;
1456
1457                 /*
1458                  * A selection of matches for the block was already saved in
1459                  * memory so that we don't have to run the uncompressed data
1460                  * through the matchfinder on every optimization pass.  However,
1461                  * we still search for repeat offset matches during each
1462                  * optimization pass because we cannot predict the state of the
1463                  * recent offsets queue.  But as a heuristic, we don't bother
1464                  * searching for repeat offset matches if the general-purpose
1465                  * matchfinder failed to find any matches.
1466                  *
1467                  * Note that a match of length n at some offset implies there is
1468                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1469                  * that same offset.  In other words, we don't necessarily need
1470                  * to use the full length of a match.  The key heuristic that
1471                  * saves a significicant amount of time is that for each
1472                  * distinct length, we only consider the smallest offset for
1473                  * which that length is available.  This heuristic also applies
1474                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1475                  * any explicit offset.  Of course, this heuristic may be
1476                  * produce suboptimal results because offset slots in LZX are
1477                  * subject to entropy encoding, but in practice this is a useful
1478                  * heuristic.
1479                  */
1480
1481                 num_matches = cache_ptr->length;
1482                 cache_ptr++;
1483
1484                 if (num_matches) {
1485                         struct lz_match *end_matches = cache_ptr + num_matches;
1486                         unsigned next_len = LZX_MIN_MATCH_LEN;
1487                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1488                         const u8 *matchptr;
1489
1490                         /* Consider R0 match  */
1491                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1492                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1493                                 goto R0_done;
1494                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1495                         do {
1496                                 u32 cost = cur_node->cost +
1497                                            c->costs.match_cost[0][
1498                                                         next_len - LZX_MIN_MATCH_LEN];
1499                                 if (cost <= (cur_node + next_len)->cost) {
1500                                         (cur_node + next_len)->cost = cost;
1501                                         (cur_node + next_len)->item =
1502                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1503                                 }
1504                                 if (unlikely(++next_len > max_len)) {
1505                                         cache_ptr = end_matches;
1506                                         goto done_matches;
1507                                 }
1508                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1509
1510                 R0_done:
1511
1512                         /* Consider R1 match  */
1513                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1514                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1515                                 goto R1_done;
1516                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1517                                 goto R1_done;
1518                         for (unsigned len = 2; len < next_len - 1; len++)
1519                                 if (matchptr[len] != in_next[len])
1520                                         goto R1_done;
1521                         do {
1522                                 u32 cost = cur_node->cost +
1523                                            c->costs.match_cost[1][
1524                                                         next_len - LZX_MIN_MATCH_LEN];
1525                                 if (cost <= (cur_node + next_len)->cost) {
1526                                         (cur_node + next_len)->cost = cost;
1527                                         (cur_node + next_len)->item =
1528                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1529                                 }
1530                                 if (unlikely(++next_len > max_len)) {
1531                                         cache_ptr = end_matches;
1532                                         goto done_matches;
1533                                 }
1534                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1535
1536                 R1_done:
1537
1538                         /* Consider R2 match  */
1539                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1540                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1541                                 goto R2_done;
1542                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1543                                 goto R2_done;
1544                         for (unsigned len = 2; len < next_len - 1; len++)
1545                                 if (matchptr[len] != in_next[len])
1546                                         goto R2_done;
1547                         do {
1548                                 u32 cost = cur_node->cost +
1549                                            c->costs.match_cost[2][
1550                                                         next_len - LZX_MIN_MATCH_LEN];
1551                                 if (cost <= (cur_node + next_len)->cost) {
1552                                         (cur_node + next_len)->cost = cost;
1553                                         (cur_node + next_len)->item =
1554                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1555                                 }
1556                                 if (unlikely(++next_len > max_len)) {
1557                                         cache_ptr = end_matches;
1558                                         goto done_matches;
1559                                 }
1560                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1561
1562                 R2_done:
1563
1564                         while (next_len > cache_ptr->length)
1565                                 if (++cache_ptr == end_matches)
1566                                         goto done_matches;
1567
1568                         /* Consider explicit offset matches  */
1569                         do {
1570                                 u32 offset = cache_ptr->offset;
1571                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1572                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1573                                                                                 is_16_bit);
1574                                 u32 base_cost = cur_node->cost;
1575
1576                         #if LZX_CONSIDER_ALIGNED_COSTS
1577                                 if (offset_data >= 16)
1578                                         base_cost += c->costs.aligned[offset_data &
1579                                                                       LZX_ALIGNED_OFFSET_BITMASK];
1580                         #endif
1581
1582                                 do {
1583                                         u32 cost = base_cost +
1584                                                    c->costs.match_cost[offset_slot][
1585                                                                 next_len - LZX_MIN_MATCH_LEN];
1586                                         if (cost < (cur_node + next_len)->cost) {
1587                                                 (cur_node + next_len)->cost = cost;
1588                                                 (cur_node + next_len)->item =
1589                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1590                                         }
1591                                 } while (++next_len <= cache_ptr->length);
1592                         } while (++cache_ptr != end_matches);
1593                 }
1594
1595         done_matches:
1596
1597                 /* Consider coding a literal.
1598
1599                  * To avoid an extra branch, actually checking the preferability
1600                  * of coding the literal is integrated into the queue update
1601                  * code below.  */
1602                 literal = *in_next++;
1603                 cost = cur_node->cost + c->costs.main[literal];
1604
1605                 /* Advance to the next position.  */
1606                 cur_node++;
1607
1608                 /* The lowest-cost path to the current position is now known.
1609                  * Finalize the recent offsets queue that results from taking
1610                  * this lowest-cost path.  */
1611
1612                 if (cost <= cur_node->cost) {
1613                         /* Literal: queue remains unchanged.  */
1614                         cur_node->cost = cost;
1615                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1616                         QUEUE(in_next) = QUEUE(in_next - 1);
1617                 } else {
1618                         /* Match: queue update is needed.  */
1619                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1620                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1621                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1622                                 /* Explicit offset match: insert offset at front  */
1623                                 QUEUE(in_next) =
1624                                         lzx_lru_queue_push(QUEUE(in_next - len),
1625                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1626                         } else {
1627                                 /* Repeat offset match: swap offset to front  */
1628                                 QUEUE(in_next) =
1629                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1630                                                            offset_data);
1631                         }
1632                 }
1633         } while (cur_node != end_node);
1634
1635         /* Return the match offset queue at the end of the minimum cost path. */
1636         return QUEUE(block_end);
1637 }
1638
1639 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1640 static void
1641 lzx_compute_match_costs(struct lzx_compressor *c)
1642 {
1643         unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1644                                         LZX_NUM_LEN_HEADERS;
1645         struct lzx_costs *costs = &c->costs;
1646
1647         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1648
1649                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1650                 unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
1651                                                         LZX_NUM_LEN_HEADERS);
1652                 unsigned i;
1653
1654         #if LZX_CONSIDER_ALIGNED_COSTS
1655                 if (offset_slot >= 8)
1656                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1657         #endif
1658
1659                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1660                         costs->match_cost[offset_slot][i] =
1661                                 costs->main[main_symbol++] + extra_cost;
1662
1663                 extra_cost += costs->main[main_symbol];
1664
1665                 for (; i < LZX_NUM_LENS; i++)
1666                         costs->match_cost[offset_slot][i] =
1667                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1668         }
1669 }
1670
1671 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1672  * algorithm.  */
1673 static void
1674 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1675 {
1676         u32 i;
1677         bool have_byte[256];
1678         unsigned num_used_bytes;
1679
1680         /* The costs below are hard coded to use a scaling factor of 16.  */
1681         STATIC_ASSERT(LZX_BIT_COST == 16);
1682
1683         /*
1684          * Heuristics:
1685          *
1686          * - Use smaller initial costs for literal symbols when the input buffer
1687          *   contains fewer distinct bytes.
1688          *
1689          * - Assume that match symbols are more costly than literal symbols.
1690          *
1691          * - Assume that length symbols for shorter lengths are less costly than
1692          *   length symbols for longer lengths.
1693          */
1694
1695         for (i = 0; i < 256; i++)
1696                 have_byte[i] = false;
1697
1698         for (i = 0; i < block_size; i++)
1699                 have_byte[block[i]] = true;
1700
1701         num_used_bytes = 0;
1702         for (i = 0; i < 256; i++)
1703                 num_used_bytes += have_byte[i];
1704
1705         for (i = 0; i < 256; i++)
1706                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1707
1708         for (; i < c->num_main_syms; i++)
1709                 c->costs.main[i] = 170;
1710
1711         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1712                 c->costs.len[i] = 103 + (i / 4);
1713
1714 #if LZX_CONSIDER_ALIGNED_COSTS
1715         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1716                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1717 #endif
1718
1719         lzx_compute_match_costs(c);
1720 }
1721
1722 /* Update the current cost model to reflect the computed Huffman codes.  */
1723 static void
1724 lzx_update_costs(struct lzx_compressor *c)
1725 {
1726         unsigned i;
1727         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1728
1729         for (i = 0; i < c->num_main_syms; i++) {
1730                 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
1731                                     MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
1732         }
1733
1734         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1735                 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
1736                                    LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
1737         }
1738
1739 #if LZX_CONSIDER_ALIGNED_COSTS
1740         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1741                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
1742                                        ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
1743         }
1744 #endif
1745
1746         lzx_compute_match_costs(c);
1747 }
1748
1749 static inline struct lzx_lru_queue
1750 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1751                              struct lzx_output_bitstream * const restrict os,
1752                              const u8 * const restrict block_begin,
1753                              const u32 block_size,
1754                              const struct lzx_lru_queue initial_queue,
1755                              bool is_16_bit)
1756 {
1757         unsigned num_passes_remaining = c->num_optim_passes;
1758         struct lzx_lru_queue new_queue;
1759         u32 seq_idx;
1760
1761         /* The first optimization pass uses a default cost model.  Each
1762          * additional optimization pass uses a cost model derived from the
1763          * Huffman code computed in the previous pass.  */
1764
1765         lzx_set_default_costs(c, block_begin, block_size);
1766         lzx_reset_symbol_frequencies(c);
1767         do {
1768                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1769                                                    initial_queue, is_16_bit);
1770                 if (num_passes_remaining > 1) {
1771                         lzx_tally_item_list(c, block_size, is_16_bit);
1772                         lzx_make_huffman_codes(c);
1773                         lzx_update_costs(c);
1774                         lzx_reset_symbol_frequencies(c);
1775                 }
1776         } while (--num_passes_remaining);
1777
1778         seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1779         lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1780         return new_queue;
1781 }
1782
1783 /*
1784  * This is the "near-optimal" LZX compressor.
1785  *
1786  * For each block, it performs a relatively thorough graph search to find an
1787  * inexpensive (in terms of compressed size) way to output that block.
1788  *
1789  * Note: there are actually many things this algorithm leaves on the table in
1790  * terms of compression ratio.  So although it may be "near-optimal", it is
1791  * certainly not "optimal".  The goal is not to produce the optimal compression
1792  * ratio, which for LZX is probably impossible within any practical amount of
1793  * time, but rather to produce a compression ratio significantly better than a
1794  * simpler "greedy" or "lazy" parse while still being relatively fast.
1795  */
1796 static inline void
1797 lzx_compress_near_optimal(struct lzx_compressor *c,
1798                           struct lzx_output_bitstream *os,
1799                           bool is_16_bit)
1800 {
1801         const u8 * const in_begin = c->in_buffer;
1802         const u8 *       in_next = in_begin;
1803         const u8 * const in_end  = in_begin + c->in_nbytes;
1804         u32 max_len = LZX_MAX_MATCH_LEN;
1805         u32 nice_len = min(c->nice_match_length, max_len);
1806         u32 next_hashes[2] = {};
1807         struct lzx_lru_queue queue;
1808
1809         CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
1810         lzx_lru_queue_init(&queue);
1811
1812         do {
1813                 /* Starting a new block  */
1814                 const u8 * const in_block_begin = in_next;
1815                 const u8 * const in_block_end =
1816                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1817
1818                 /* Run the block through the matchfinder and cache the matches. */
1819                 struct lz_match *cache_ptr = c->match_cache;
1820                 do {
1821                         struct lz_match *lz_matchptr;
1822                         u32 best_len;
1823
1824                         /* If approaching the end of the input buffer, adjust
1825                          * 'max_len' and 'nice_len' accordingly.  */
1826                         if (unlikely(max_len > in_end - in_next)) {
1827                                 max_len = in_end - in_next;
1828                                 nice_len = min(max_len, nice_len);
1829                                 if (unlikely(max_len <
1830                                              BT_MATCHFINDER_REQUIRED_NBYTES))
1831                                 {
1832                                         in_next++;
1833                                         cache_ptr->length = 0;
1834                                         cache_ptr++;
1835                                         continue;
1836                                 }
1837                         }
1838
1839                         /* Check for matches.  */
1840                         lz_matchptr = CALL_BT_MF(is_16_bit, c,
1841                                                  bt_matchfinder_get_matches,
1842                                                  in_begin,
1843                                                  in_next - in_begin,
1844                                                  max_len,
1845                                                  nice_len,
1846                                                  c->max_search_depth,
1847                                                  next_hashes,
1848                                                  &best_len,
1849                                                  cache_ptr + 1);
1850                         in_next++;
1851                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1852                         cache_ptr = lz_matchptr;
1853
1854                         /*
1855                          * If there was a very long match found, then don't
1856                          * cache any matches for the bytes covered by that
1857                          * match.  This avoids degenerate behavior when
1858                          * compressing highly redundant data, where the number
1859                          * of matches can be very large.
1860                          *
1861                          * This heuristic doesn't actually hurt the compression
1862                          * ratio very much.  If there's a long match, then the
1863                          * data must be highly compressible, so it doesn't
1864                          * matter as much what we do.
1865                          */
1866                         if (best_len >= nice_len) {
1867                                 --best_len;
1868                                 do {
1869                                         if (unlikely(max_len > in_end - in_next)) {
1870                                                 max_len = in_end - in_next;
1871                                                 nice_len = min(max_len, nice_len);
1872                                                 if (unlikely(max_len <
1873                                                              BT_MATCHFINDER_REQUIRED_NBYTES))
1874                                                 {
1875                                                         in_next++;
1876                                                         cache_ptr->length = 0;
1877                                                         cache_ptr++;
1878                                                         continue;
1879                                                 }
1880                                         }
1881                                         CALL_BT_MF(is_16_bit, c,
1882                                                    bt_matchfinder_skip_position,
1883                                                    in_begin,
1884                                                    in_next - in_begin,
1885                                                    max_len,
1886                                                    nice_len,
1887                                                    c->max_search_depth,
1888                                                    next_hashes);
1889                                         in_next++;
1890                                         cache_ptr->length = 0;
1891                                         cache_ptr++;
1892                                 } while (--best_len);
1893                         }
1894                 } while (in_next < in_block_end &&
1895                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1896
1897                 /* We've finished running the block through the matchfinder.
1898                  * Now choose a match/literal sequence and write the block.  */
1899
1900                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1901                                                      in_next - in_block_begin,
1902                                                      queue, is_16_bit);
1903         } while (in_next != in_end);
1904 }
1905
1906 static void
1907 lzx_compress_near_optimal_16(struct lzx_compressor *c,
1908                              struct lzx_output_bitstream *os)
1909 {
1910         lzx_compress_near_optimal(c, os, true);
1911 }
1912
1913 static void
1914 lzx_compress_near_optimal_32(struct lzx_compressor *c,
1915                              struct lzx_output_bitstream *os)
1916 {
1917         lzx_compress_near_optimal(c, os, false);
1918 }
1919
1920 /*
1921  * Given a pointer to the current byte sequence and the current list of recent
1922  * match offsets, find the longest repeat offset match.
1923  *
1924  * If no match of at least 2 bytes is found, then return 0.
1925  *
1926  * If a match of at least 2 bytes is found, then return its length and set
1927  * *rep_max_idx_ret to the index of its offset in @queue.
1928 */
1929 static unsigned
1930 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1931                                      const u32 bytes_remaining,
1932                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1933                                      unsigned *rep_max_idx_ret)
1934 {
1935         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1936
1937         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1938         const u16 next_2_bytes = load_u16_unaligned(in_next);
1939         const u8 *matchptr;
1940         unsigned rep_max_len;
1941         unsigned rep_max_idx;
1942         unsigned rep_len;
1943
1944         matchptr = in_next - recent_offsets[0];
1945         if (load_u16_unaligned(matchptr) == next_2_bytes)
1946                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1947         else
1948                 rep_max_len = 0;
1949         rep_max_idx = 0;
1950
1951         matchptr = in_next - recent_offsets[1];
1952         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1953                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1954                 if (rep_len > rep_max_len) {
1955                         rep_max_len = rep_len;
1956                         rep_max_idx = 1;
1957                 }
1958         }
1959
1960         matchptr = in_next - recent_offsets[2];
1961         if (load_u16_unaligned(matchptr) == next_2_bytes) {
1962                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1963                 if (rep_len > rep_max_len) {
1964                         rep_max_len = rep_len;
1965                         rep_max_idx = 2;
1966                 }
1967         }
1968
1969         *rep_max_idx_ret = rep_max_idx;
1970         return rep_max_len;
1971 }
1972
1973 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
1974 static inline unsigned
1975 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1976 {
1977         unsigned score = len;
1978
1979         if (adjusted_offset < 4096)
1980                 score++;
1981
1982         if (adjusted_offset < 256)
1983                 score++;
1984
1985         return score;
1986 }
1987
1988 static inline unsigned
1989 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1990 {
1991         return rep_len + 3;
1992 }
1993
1994 /* This is the "lazy" LZX compressor.  */
1995 static inline void
1996 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1997                   bool is_16_bit)
1998 {
1999         const u8 * const in_begin = c->in_buffer;
2000         const u8 *       in_next = in_begin;
2001         const u8 * const in_end  = in_begin + c->in_nbytes;
2002         unsigned max_len = LZX_MAX_MATCH_LEN;
2003         unsigned nice_len = min(c->nice_match_length, max_len);
2004         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2005         u32 recent_offsets[3] = {1, 1, 1};
2006         u32 next_hashes[2] = {};
2007
2008         CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2009
2010         do {
2011                 /* Starting a new block  */
2012
2013                 const u8 * const in_block_begin = in_next;
2014                 const u8 * const in_block_end =
2015                         in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
2016                 struct lzx_sequence *next_seq = c->chosen_sequences;
2017                 unsigned cur_len;
2018                 u32 cur_offset;
2019                 u32 cur_offset_data;
2020                 unsigned cur_score;
2021                 unsigned next_len;
2022                 u32 next_offset;
2023                 u32 next_offset_data;
2024                 unsigned next_score;
2025                 unsigned rep_max_len;
2026                 unsigned rep_max_idx;
2027                 unsigned rep_score;
2028                 unsigned skip_len;
2029                 u32 litrunlen = 0;
2030
2031                 lzx_reset_symbol_frequencies(c);
2032
2033                 do {
2034                         if (unlikely(max_len > in_end - in_next)) {
2035                                 max_len = in_end - in_next;
2036                                 nice_len = min(max_len, nice_len);
2037                         }
2038
2039                         /* Find the longest match at the current position.  */
2040
2041                         cur_len = CALL_HC_MF(is_16_bit, c,
2042                                              hc_matchfinder_longest_match,
2043                                              in_begin,
2044                                              in_next - in_begin,
2045                                              2,
2046                                              max_len,
2047                                              nice_len,
2048                                              c->max_search_depth,
2049                                              next_hashes,
2050                                              &cur_offset);
2051                         if (cur_len < 3 ||
2052                             (cur_len == 3 &&
2053                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2054                              cur_offset != recent_offsets[0] &&
2055                              cur_offset != recent_offsets[1] &&
2056                              cur_offset != recent_offsets[2]))
2057                         {
2058                                 /* There was no match found, or the only match found
2059                                  * was a distant length 3 match.  Output a literal.  */
2060                                 lzx_record_literal(c, *in_next++, &litrunlen);
2061                                 continue;
2062                         }
2063
2064                         if (cur_offset == recent_offsets[0]) {
2065                                 in_next++;
2066                                 cur_offset_data = 0;
2067                                 skip_len = cur_len - 1;
2068                                 goto choose_cur_match;
2069                         }
2070
2071                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2072                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2073
2074                         /* Consider a repeat offset match  */
2075                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2076                                                                            in_end - in_next,
2077                                                                            recent_offsets,
2078                                                                            &rep_max_idx);
2079                         in_next++;
2080
2081                         if (rep_max_len >= 3 &&
2082                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2083                                                                        rep_max_idx)) >= cur_score)
2084                         {
2085                                 cur_len = rep_max_len;
2086                                 cur_offset_data = rep_max_idx;
2087                                 skip_len = rep_max_len - 1;
2088                                 goto choose_cur_match;
2089                         }
2090
2091                 have_cur_match:
2092
2093                         /* We have a match at the current position.  */
2094
2095                         /* If we have a very long match, choose it immediately.  */
2096                         if (cur_len >= nice_len) {
2097                                 skip_len = cur_len - 1;
2098                                 goto choose_cur_match;
2099                         }
2100
2101                         /* See if there's a better match at the next position.  */
2102
2103                         if (unlikely(max_len > in_end - in_next)) {
2104                                 max_len = in_end - in_next;
2105                                 nice_len = min(max_len, nice_len);
2106                         }
2107
2108                         next_len = CALL_HC_MF(is_16_bit, c,
2109                                               hc_matchfinder_longest_match,
2110                                               in_begin,
2111                                               in_next - in_begin,
2112                                               cur_len - 2,
2113                                               max_len,
2114                                               nice_len,
2115                                               c->max_search_depth / 2,
2116                                               next_hashes,
2117                                               &next_offset);
2118
2119                         if (next_len <= cur_len - 2) {
2120                                 in_next++;
2121                                 skip_len = cur_len - 2;
2122                                 goto choose_cur_match;
2123                         }
2124
2125                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2126                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2127
2128                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2129                                                                            in_end - in_next,
2130                                                                            recent_offsets,
2131                                                                            &rep_max_idx);
2132                         in_next++;
2133
2134                         if (rep_max_len >= 3 &&
2135                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2136                                                                        rep_max_idx)) >= next_score)
2137                         {
2138
2139                                 if (rep_score > cur_score) {
2140                                         /* The next match is better, and it's a
2141                                          * repeat offset match.  */
2142                                         lzx_record_literal(c, *(in_next - 2),
2143                                                            &litrunlen);
2144                                         cur_len = rep_max_len;
2145                                         cur_offset_data = rep_max_idx;
2146                                         skip_len = cur_len - 1;
2147                                         goto choose_cur_match;
2148                                 }
2149                         } else {
2150                                 if (next_score > cur_score) {
2151                                         /* The next match is better, and it's an
2152                                          * explicit offset match.  */
2153                                         lzx_record_literal(c, *(in_next - 2),
2154                                                            &litrunlen);
2155                                         cur_len = next_len;
2156                                         cur_offset_data = next_offset_data;
2157                                         cur_score = next_score;
2158                                         goto have_cur_match;
2159                                 }
2160                         }
2161
2162                         /* The original match was better.  */
2163                         skip_len = cur_len - 2;
2164
2165                 choose_cur_match:
2166                         lzx_record_match(c, cur_len, cur_offset_data,
2167                                          recent_offsets, is_16_bit,
2168                                          &litrunlen, &next_seq);
2169                         in_next = CALL_HC_MF(is_16_bit, c,
2170                                              hc_matchfinder_skip_positions,
2171                                              in_begin,
2172                                              in_next - in_begin,
2173                                              in_end - in_begin,
2174                                              skip_len,
2175                                              next_hashes);
2176                 } while (in_next < in_block_end);
2177
2178                 lzx_finish_sequence(next_seq, litrunlen);
2179
2180                 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2181
2182         } while (in_next != in_end);
2183 }
2184
2185 static void
2186 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2187 {
2188         lzx_compress_lazy(c, os, true);
2189 }
2190
2191 static void
2192 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2193 {
2194         lzx_compress_lazy(c, os, false);
2195 }
2196
2197 /* Generate the acceleration tables for offset slots.  */
2198 static void
2199 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2200 {
2201         u32 adjusted_offset = 0;
2202         unsigned slot = 0;
2203
2204         /* slots [0, 29]  */
2205         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2206              adjusted_offset++)
2207         {
2208                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2209                         slot++;
2210                 c->offset_slot_tab_1[adjusted_offset] = slot;
2211         }
2212
2213         /* slots [30, 49]  */
2214         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2215              adjusted_offset += (u32)1 << 14)
2216         {
2217                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2218                         slot++;
2219                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2220         }
2221 }
2222
2223 static size_t
2224 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2225 {
2226         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2227                 if (lzx_is_16_bit(max_bufsize))
2228                         return offsetof(struct lzx_compressor, hc_mf_16) +
2229                                hc_matchfinder_size_16(max_bufsize);
2230                 else
2231                         return offsetof(struct lzx_compressor, hc_mf_32) +
2232                                hc_matchfinder_size_32(max_bufsize);
2233         } else {
2234                 if (lzx_is_16_bit(max_bufsize))
2235                         return offsetof(struct lzx_compressor, bt_mf_16) +
2236                                bt_matchfinder_size_16(max_bufsize);
2237                 else
2238                         return offsetof(struct lzx_compressor, bt_mf_32) +
2239                                bt_matchfinder_size_32(max_bufsize);
2240         }
2241 }
2242
2243 static u64
2244 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2245                       bool destructive)
2246 {
2247         u64 size = 0;
2248
2249         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2250                 return 0;
2251
2252         size += lzx_get_compressor_size(max_bufsize, compression_level);
2253         if (!destructive)
2254                 size += max_bufsize; /* in_buffer */
2255         return size;
2256 }
2257
2258 static int
2259 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2260                       bool destructive, void **c_ret)
2261 {
2262         unsigned window_order;
2263         struct lzx_compressor *c;
2264
2265         window_order = lzx_get_window_order(max_bufsize);
2266         if (window_order == 0)
2267                 return WIMLIB_ERR_INVALID_PARAM;
2268
2269         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2270         if (!c)
2271                 goto oom0;
2272
2273         c->destructive = destructive;
2274
2275         c->num_main_syms = lzx_get_num_main_syms(window_order);
2276         c->window_order = window_order;
2277
2278         if (!c->destructive) {
2279                 c->in_buffer = MALLOC(max_bufsize);
2280                 if (!c->in_buffer)
2281                         goto oom1;
2282         }
2283
2284         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2285
2286                 /* Fast compression: Use lazy parsing.  */
2287
2288                 if (lzx_is_16_bit(max_bufsize))
2289                         c->impl = lzx_compress_lazy_16;
2290                 else
2291                         c->impl = lzx_compress_lazy_32;
2292                 c->max_search_depth = (60 * compression_level) / 20;
2293                 c->nice_match_length = (80 * compression_level) / 20;
2294
2295                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2296                  * halves the max_search_depth when attempting a lazy match, and
2297                  * max_search_depth cannot be 0.  */
2298                 if (c->max_search_depth < 2)
2299                         c->max_search_depth = 2;
2300         } else {
2301
2302                 /* Normal / high compression: Use near-optimal parsing.  */
2303
2304                 if (lzx_is_16_bit(max_bufsize))
2305                         c->impl = lzx_compress_near_optimal_16;
2306                 else
2307                         c->impl = lzx_compress_near_optimal_32;
2308
2309                 /* Scale nice_match_length and max_search_depth with the
2310                  * compression level.  */
2311                 c->max_search_depth = (24 * compression_level) / 50;
2312                 c->nice_match_length = (48 * compression_level) / 50;
2313
2314                 /* Set a number of optimization passes appropriate for the
2315                  * compression level.  */
2316
2317                 c->num_optim_passes = 1;
2318
2319                 if (compression_level >= 45)
2320                         c->num_optim_passes++;
2321
2322                 /* Use more optimization passes for higher compression levels.
2323                  * But the more passes there are, the less they help --- so
2324                  * don't add them linearly.  */
2325                 if (compression_level >= 70) {
2326                         c->num_optim_passes++;
2327                         if (compression_level >= 100)
2328                                 c->num_optim_passes++;
2329                         if (compression_level >= 150)
2330                                 c->num_optim_passes++;
2331                         if (compression_level >= 200)
2332                                 c->num_optim_passes++;
2333                         if (compression_level >= 300)
2334                                 c->num_optim_passes++;
2335                 }
2336         }
2337
2338         /* max_search_depth == 0 is invalid.  */
2339         if (c->max_search_depth < 1)
2340                 c->max_search_depth = 1;
2341
2342         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2343                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2344
2345         lzx_init_offset_slot_tabs(c);
2346         *c_ret = c;
2347         return 0;
2348
2349 oom1:
2350         FREE(c);
2351 oom0:
2352         return WIMLIB_ERR_NOMEM;
2353 }
2354
2355 static size_t
2356 lzx_compress(const void *restrict in, size_t in_nbytes,
2357              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2358 {
2359         struct lzx_compressor *c = _c;
2360         struct lzx_output_bitstream os;
2361         size_t result;
2362
2363         /* Don't bother trying to compress very small inputs.  */
2364         if (in_nbytes < 100)
2365                 return 0;
2366
2367         /* Copy the input data into the internal buffer and preprocess it.  */
2368         if (c->destructive)
2369                 c->in_buffer = (void *)in;
2370         else
2371                 memcpy(c->in_buffer, in, in_nbytes);
2372         c->in_nbytes = in_nbytes;
2373         lzx_preprocess(c->in_buffer, in_nbytes);
2374
2375         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2376         c->codes_index = 0;
2377         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2378
2379         /* Initialize the output bitstream.  */
2380         lzx_init_output(&os, out, out_nbytes_avail);
2381
2382         /* Call the compression level-specific compress() function.  */
2383         (*c->impl)(c, &os);
2384
2385         /* Flush the output bitstream and return the compressed size or 0.  */
2386         result = lzx_flush_output(&os);
2387         if (!result && c->destructive)
2388                 lzx_postprocess(c->in_buffer, c->in_nbytes);
2389         return result;
2390 }
2391
2392 static void
2393 lzx_free_compressor(void *_c)
2394 {
2395         struct lzx_compressor *c = _c;
2396
2397         if (!c->destructive)
2398                 FREE(c->in_buffer);
2399         FREE(c);
2400 }
2401
2402 const struct compressor_ops lzx_compressor_ops = {
2403         .get_needed_memory  = lzx_get_needed_memory,
2404         .create_compressor  = lzx_create_compressor,
2405         .compress           = lzx_compress,
2406         .free_compressor    = lzx_free_compressor,
2407 };