]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
lzx block split
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012-2016 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * The compressor always chooses a block of at least MIN_BLOCK_SIZE bytes,
69  * except if the last block has to be shorter.
70  */
71 #define MIN_BLOCK_SIZE          6500
72
73 /*
74  * The compressor attempts to end blocks after SOFT_MAX_BLOCK_SIZE bytes, but
75  * the final size might be larger due to matches extending beyond the end of the
76  * block.  Specifically:
77  *
78  *  - The greedy parser may choose an arbitrarily long match starting at the
79  *    SOFT_MAX_BLOCK_SIZE'th byte.
80  *
81  *  - The lazy parser may choose a sequence of literals starting at the
82  *    SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasing good
83  *    matches.  The final match may be of arbitrary length.  The length of the
84  *    literal sequence is approximately limited by the "nice match length"
85  *    parameter.
86  */
87 #define SOFT_MAX_BLOCK_SIZE     100000
88
89 /*
90  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
91  * excluding the extra "overflow" entries.  This value should be high enough so
92  * that nearly the time, all matches found in a given block can fit in the match
93  * cache.  However, fallback behavior (immediately terminating the block) on
94  * cache overflow is still required.
95  */
96 #define LZX_CACHE_LENGTH        (SOFT_MAX_BLOCK_SIZE * 5)
97
98 /*
99  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
100  * ever be saved in the match cache for a single position.  Since each match we
101  * save for a single position has a distinct length, we can use the number of
102  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
103  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
104  * it will never actually be reached.
105  */
106 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
107
108 /*
109  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
110  * This makes it possible to consider fractional bit costs.
111  *
112  * Note: this is only useful as a statistical trick for when the true costs are
113  * unknown.  In reality, each token in LZX requires a whole number of bits to
114  * output.
115  */
116 #define LZX_BIT_COST            16
117
118 /*
119  * Should the compressor take into account the costs of aligned offset symbols?
120  */
121 #define LZX_CONSIDER_ALIGNED_COSTS      1
122
123 /*
124  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
125  * faster algorithm.
126  */
127 #define LZX_MAX_FAST_LEVEL      34
128
129 /*
130  * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
131  * hash table for finding length 2 matches.  This could be as high as 16, but
132  * using a smaller hash table speeds up compression due to reduced cache
133  * pressure.
134  */
135 #define BT_MATCHFINDER_HASH2_ORDER      12
136
137 /*
138  * These are the compressor-side limits on the codeword lengths for each Huffman
139  * code.  To make outputting bits slightly faster, some of these limits are
140  * lower than the limits defined by the LZX format.  This does not significantly
141  * affect the compression ratio, at least for the block sizes we use.
142  */
143 #define MAIN_CODEWORD_LIMIT     12      /* 64-bit: can buffer 4 main symbols  */
144 #define LENGTH_CODEWORD_LIMIT   12
145 #define ALIGNED_CODEWORD_LIMIT  7
146 #define PRE_CODEWORD_LIMIT      7
147
148 #include "wimlib/compress_common.h"
149 #include "wimlib/compressor_ops.h"
150 #include "wimlib/error.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/lzx_common.h"
153 #include "wimlib/unaligned.h"
154 #include "wimlib/util.h"
155
156 /* Matchfinders with 16-bit positions  */
157 #define mf_pos_t        u16
158 #define MF_SUFFIX       _16
159 #include "wimlib/bt_matchfinder.h"
160 #include "wimlib/hc_matchfinder.h"
161
162 /* Matchfinders with 32-bit positions  */
163 #undef mf_pos_t
164 #undef MF_SUFFIX
165 #define mf_pos_t        u32
166 #define MF_SUFFIX       _32
167 #include "wimlib/bt_matchfinder.h"
168 #include "wimlib/hc_matchfinder.h"
169
170 struct lzx_output_bitstream;
171
172 /* Codewords for the LZX Huffman codes.  */
173 struct lzx_codewords {
174         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
175         u32 len[LZX_LENCODE_NUM_SYMBOLS];
176         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
177 };
178
179 /* Codeword lengths (in bits) for the LZX Huffman codes.
180  * A zero length means the corresponding codeword has zero frequency.  */
181 struct lzx_lens {
182         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
183         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
184         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
185 };
186
187 /* Cost model for near-optimal parsing  */
188 struct lzx_costs {
189
190         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
191          * length 'len' match that has an offset belonging to 'offset_slot'.  */
192         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
193
194         /* Cost for each symbol in the main code  */
195         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
196
197         /* Cost for each symbol in the length code  */
198         u32 len[LZX_LENCODE_NUM_SYMBOLS];
199
200 #if LZX_CONSIDER_ALIGNED_COSTS
201         /* Cost for each symbol in the aligned code  */
202         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
203 #endif
204 };
205
206 /* Codewords and lengths for the LZX Huffman codes.  */
207 struct lzx_codes {
208         struct lzx_codewords codewords;
209         struct lzx_lens lens;
210 };
211
212 /* Symbol frequency counters for the LZX Huffman codes.  */
213 struct lzx_freqs {
214         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
215         u32 len[LZX_LENCODE_NUM_SYMBOLS];
216         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
217 };
218
219 /* Block split statistics.  See "Block splitting algorithm" below. */
220 #define NUM_LITERAL_OBSERVATION_TYPES 8
221 #define NUM_MATCH_OBSERVATION_TYPES 2
222 #define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
223 struct block_split_stats {
224         u32 new_observations[NUM_OBSERVATION_TYPES];
225         u32 observations[NUM_OBSERVATION_TYPES];
226         u32 num_new_observations;
227         u32 num_observations;
228 };
229
230 /*
231  * Represents a run of literals followed by a match or end-of-block.  This
232  * struct is needed to temporarily store items chosen by the parser, since items
233  * cannot be written until all items for the block have been chosen and the
234  * block's Huffman codes have been computed.
235  */
236 struct lzx_sequence {
237
238         /* The number of literals in the run.  This may be 0.  The literals are
239          * not stored explicitly in this structure; instead, they are read
240          * directly from the uncompressed data.  */
241         u16 litrunlen;
242
243         /* If the next field doesn't indicate end-of-block, then this is the
244          * match length minus LZX_MIN_MATCH_LEN.  */
245         u16 adjusted_length;
246
247         /* If bit 31 is clear, then this field contains the match header in bits
248          * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
249          * recent offset code in bits 9-30.  Otherwise (if bit 31 is set), this
250          * sequence's literal run was the last literal run in the block, so
251          * there is no match that follows it.  */
252         u32 adjusted_offset_and_match_hdr;
253 };
254
255 /*
256  * This structure represents a byte position in the input buffer and a node in
257  * the graph of possible match/literal choices.
258  *
259  * Logically, each incoming edge to this node is labeled with a literal or a
260  * match that can be taken to reach this position from an earlier position; and
261  * each outgoing edge from this node is labeled with a literal or a match that
262  * can be taken to advance from this position to a later position.
263  */
264 struct lzx_optimum_node {
265
266         /* The cost, in bits, of the lowest-cost path that has been found to
267          * reach this position.  This can change as progressively lower cost
268          * paths are found to reach this position.  */
269         u32 cost;
270
271         /*
272          * The match or literal that was taken to reach this position.  This can
273          * change as progressively lower cost paths are found to reach this
274          * position.
275          *
276          * This variable is divided into two bitfields.
277          *
278          * Literals:
279          *      Low bits are 0, high bits are the literal.
280          *
281          * Explicit offset matches:
282          *      Low bits are the match length, high bits are the offset plus 2.
283          *
284          * Repeat offset matches:
285          *      Low bits are the match length, high bits are the queue index.
286          */
287         u32 item;
288 #define OPTIMUM_OFFSET_SHIFT 9
289 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
290 } _aligned_attribute(8);
291
292 /*
293  * Least-recently-used queue for match offsets.
294  *
295  * This is represented as a 64-bit integer for efficiency.  There are three
296  * offsets of 21 bits each.  Bit 64 is garbage.
297  */
298 struct lzx_lru_queue {
299         u64 R;
300 };
301
302 #define LZX_QUEUE64_OFFSET_SHIFT 21
303 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
304
305 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
306 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
307 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
308
309 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
310 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
311 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
312
313 static inline void
314 lzx_lru_queue_init(struct lzx_lru_queue *queue)
315 {
316         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
317                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
318                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
319 }
320
321 static inline u64
322 lzx_lru_queue_R0(struct lzx_lru_queue queue)
323 {
324         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
325 }
326
327 static inline u64
328 lzx_lru_queue_R1(struct lzx_lru_queue queue)
329 {
330         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
331 }
332
333 static inline u64
334 lzx_lru_queue_R2(struct lzx_lru_queue queue)
335 {
336         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
337 }
338
339 /* Push a match offset onto the front (most recently used) end of the queue.  */
340 static inline struct lzx_lru_queue
341 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
342 {
343         return (struct lzx_lru_queue) {
344                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
345         };
346 }
347
348 /* Swap a match offset to the front of the queue.  */
349 static inline struct lzx_lru_queue
350 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
351 {
352         if (idx == 0)
353                 return queue;
354
355         if (idx == 1)
356                 return (struct lzx_lru_queue) {
357                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
358                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
359                              (queue.R & LZX_QUEUE64_R2_MASK),
360                 };
361
362         return (struct lzx_lru_queue) {
363                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
364                      (queue.R & LZX_QUEUE64_R1_MASK) |
365                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
366         };
367 }
368
369 /* The main LZX compressor structure  */
370 struct lzx_compressor {
371
372         /* The "nice" match length: if a match of this length is found, then
373          * choose it immediately without further consideration.  */
374         unsigned nice_match_length;
375
376         /* The maximum search depth: consider at most this many potential
377          * matches at each position.  */
378         unsigned max_search_depth;
379
380         /* The log base 2 of the LZX window size for LZ match offset encoding
381          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
382          * LZX_MAX_WINDOW_ORDER.  */
383         unsigned window_order;
384
385         /* The number of symbols in the main alphabet.  This depends on
386          * @window_order, since @window_order determines the maximum possible
387          * offset.  */
388         unsigned num_main_syms;
389
390         /* Number of optimization passes per block  */
391         unsigned num_optim_passes;
392
393         /* The preprocessed buffer of data being compressed  */
394         u8 *in_buffer;
395
396         /* The number of bytes of data to be compressed, which is the number of
397          * bytes of data in @in_buffer that are actually valid.  */
398         size_t in_nbytes;
399
400         /* Pointer to the compress() implementation chosen at allocation time */
401         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
402
403         /* If true, the compressor need not preserve the input buffer if it
404          * compresses the data successfully.  */
405         bool destructive;
406
407         /* The Huffman symbol frequency counters for the current block.  */
408         struct lzx_freqs freqs;
409
410         /* Block split statistics.  */
411         struct block_split_stats split_stats;
412
413         /* The Huffman codes for the current and previous blocks.  The one with
414          * index 'codes_index' is for the current block, and the other one is
415          * for the previous block.  */
416         struct lzx_codes codes[2];
417         unsigned codes_index;
418
419         /* The matches and literals that the parser has chosen for the current
420          * block.  The required length of this array is limited by the maximum
421          * number of matches that can ever be chosen for a single block, plus
422          * one for the special entry at the end.  */
423         struct lzx_sequence chosen_sequences[
424                        DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
425
426         /* Tables for mapping adjusted offsets to offset slots  */
427
428         /* offset slots [0, 29]  */
429         u8 offset_slot_tab_1[32768];
430
431         /* offset slots [30, 49]  */
432         u8 offset_slot_tab_2[128];
433
434         union {
435                 /* Data for greedy or lazy parsing  */
436                 struct {
437                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
438                         union {
439                                 struct hc_matchfinder_16 hc_mf_16;
440                                 struct hc_matchfinder_32 hc_mf_32;
441                         };
442                 };
443
444                 /* Data for near-optimal parsing  */
445                 struct {
446                         /*
447                          * Array of nodes, one per position, for running the
448                          * minimum-cost path algorithm.
449                          *
450                          * This array must be large enough to accommodate the
451                          * worst-case number of nodes, which occurs if we find a
452                          * match of length LZX_MAX_MATCH_LEN at position
453                          * SOFT_MAX_BLOCK_SIZE - 1, producing a block of length
454                          * SOFT_MAX_BLOCK_SIZE - 1 + LZX_MAX_MATCH_LEN.  Add one
455                          * for the end-of-block node.
456                          */
457                         struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_SIZE - 1 +
458                                                               LZX_MAX_MATCH_LEN + 1];
459
460                         /* The cost model for the current block  */
461                         struct lzx_costs costs;
462
463                         /*
464                          * Cached matches for the current block.  This array
465                          * contains the matches that were found at each position
466                          * in the block.  Specifically, for each position, there
467                          * is a special 'struct lz_match' whose 'length' field
468                          * contains the number of matches that were found at
469                          * that position; this is followed by the matches
470                          * themselves, if any, sorted by strictly increasing
471                          * length.
472                          *
473                          * Note: in rare cases, there will be a very high number
474                          * of matches in the block and this array will overflow.
475                          * If this happens, we force the end of the current
476                          * block.  LZX_CACHE_LENGTH is the length at which we
477                          * actually check for overflow.  The extra slots beyond
478                          * this are enough to absorb the worst case overflow,
479                          * which occurs if starting at
480                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
481                          * match count header, then write
482                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
483                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
484                          * write the match count header for each.
485                          */
486                         struct lz_match match_cache[LZX_CACHE_LENGTH +
487                                                     LZX_MAX_MATCHES_PER_POS +
488                                                     LZX_MAX_MATCH_LEN - 1];
489
490                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
491                         union {
492                                 struct bt_matchfinder_16 bt_mf_16;
493                                 struct bt_matchfinder_32 bt_mf_32;
494                         };
495                 };
496         };
497 };
498
499 /*
500  * Will a matchfinder using 16-bit positions be sufficient for compressing
501  * buffers of up to the specified size?  The limit could be 65536 bytes, but we
502  * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
503  * This requires that the limit be no more than the length of offset_slot_tab_1
504  * (currently 32768).
505  */
506 static inline bool
507 lzx_is_16_bit(size_t max_bufsize)
508 {
509         STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
510         return max_bufsize <= 32768;
511 }
512
513 /*
514  * The following macros call either the 16-bit or the 32-bit version of a
515  * matchfinder function based on the value of 'is_16_bit', which will be known
516  * at compilation time.
517  */
518
519 #define CALL_HC_MF(is_16_bit, c, funcname, ...)                               \
520         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
521                        CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
522
523 #define CALL_BT_MF(is_16_bit, c, funcname, ...)                               \
524         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
525                        CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
526
527 /*
528  * Structure to keep track of the current state of sending bits to the
529  * compressed output buffer.
530  *
531  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
532  */
533 struct lzx_output_bitstream {
534
535         /* Bits that haven't yet been written to the output buffer.  */
536         machine_word_t bitbuf;
537
538         /* Number of bits currently held in @bitbuf.  */
539         u32 bitcount;
540
541         /* Pointer to the start of the output buffer.  */
542         u8 *start;
543
544         /* Pointer to the position in the output buffer at which the next coding
545          * unit should be written.  */
546         u8 *next;
547
548         /* Pointer just past the end of the output buffer, rounded down to a
549          * 2-byte boundary.  */
550         u8 *end;
551 };
552
553 /* Can the specified number of bits always be added to 'bitbuf' after any
554  * pending 16-bit coding units have been flushed?  */
555 #define CAN_BUFFER(n)   ((n) <= (8 * sizeof(machine_word_t)) - 15)
556
557 /*
558  * Initialize the output bitstream.
559  *
560  * @os
561  *      The output bitstream structure to initialize.
562  * @buffer
563  *      The buffer being written to.
564  * @size
565  *      Size of @buffer, in bytes.
566  */
567 static void
568 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
569 {
570         os->bitbuf = 0;
571         os->bitcount = 0;
572         os->start = buffer;
573         os->next = os->start;
574         os->end = os->start + (size & ~1);
575 }
576
577 /* Add some bits to the bitbuffer variable of the output bitstream.  The caller
578  * must make sure there is enough room.  */
579 static inline void
580 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
581 {
582         os->bitbuf = (os->bitbuf << num_bits) | bits;
583         os->bitcount += num_bits;
584 }
585
586 /* Flush bits from the bitbuffer variable to the output buffer.  'max_num_bits'
587  * specifies the maximum number of bits that may have been added since the last
588  * flush.  */
589 static inline void
590 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
591 {
592         /* Masking the number of bits to shift is only needed to avoid undefined
593          * behavior; we don't actually care about the results of bad shifts.  On
594          * x86, the explicit masking generates no extra code.  */
595         const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
596
597         if (os->end - os->next < 6)
598                 return;
599         put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
600                                             shift_mask), os->next + 0);
601         if (max_num_bits > 16)
602                 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
603                                                 shift_mask), os->next + 2);
604         if (max_num_bits > 32)
605                 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
606                                                 shift_mask), os->next + 4);
607         os->next += (os->bitcount >> 4) << 1;
608         os->bitcount &= 15;
609 }
610
611 /* Add at most 16 bits to the bitbuffer and flush it.  */
612 static inline void
613 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
614 {
615         lzx_add_bits(os, bits, num_bits);
616         lzx_flush_bits(os, 16);
617 }
618
619 /*
620  * Flush the last coding unit to the output buffer if needed.  Return the total
621  * number of bytes written to the output buffer, or 0 if an overflow occurred.
622  */
623 static u32
624 lzx_flush_output(struct lzx_output_bitstream *os)
625 {
626         if (os->end - os->next < 6)
627                 return 0;
628
629         if (os->bitcount != 0) {
630                 put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
631                 os->next += 2;
632         }
633
634         return os->next - os->start;
635 }
636
637 /* Build the main, length, and aligned offset Huffman codes used in LZX.
638  *
639  * This takes as input the frequency tables for each code and produces as output
640  * a set of tables that map symbols to codewords and codeword lengths.  */
641 static void
642 lzx_make_huffman_codes(struct lzx_compressor *c)
643 {
644         const struct lzx_freqs *freqs = &c->freqs;
645         struct lzx_codes *codes = &c->codes[c->codes_index];
646
647         STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
648                       MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
649         STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
650                       LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
651         STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
652                       ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
653
654         make_canonical_huffman_code(c->num_main_syms,
655                                     MAIN_CODEWORD_LIMIT,
656                                     freqs->main,
657                                     codes->lens.main,
658                                     codes->codewords.main);
659
660         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
661                                     LENGTH_CODEWORD_LIMIT,
662                                     freqs->len,
663                                     codes->lens.len,
664                                     codes->codewords.len);
665
666         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
667                                     ALIGNED_CODEWORD_LIMIT,
668                                     freqs->aligned,
669                                     codes->lens.aligned,
670                                     codes->codewords.aligned);
671 }
672
673 /* Reset the symbol frequencies for the LZX Huffman codes.  */
674 static void
675 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
676 {
677         memset(&c->freqs, 0, sizeof(c->freqs));
678 }
679
680 static unsigned
681 lzx_compute_precode_items(const u8 lens[restrict],
682                           const u8 prev_lens[restrict],
683                           u32 precode_freqs[restrict],
684                           unsigned precode_items[restrict])
685 {
686         unsigned *itemptr;
687         unsigned run_start;
688         unsigned run_end;
689         unsigned extra_bits;
690         int delta;
691         u8 len;
692
693         itemptr = precode_items;
694         run_start = 0;
695
696         while (!((len = lens[run_start]) & 0x80)) {
697
698                 /* len = the length being repeated  */
699
700                 /* Find the next run of codeword lengths.  */
701
702                 run_end = run_start + 1;
703
704                 /* Fast case for a single length.  */
705                 if (likely(len != lens[run_end])) {
706                         delta = prev_lens[run_start] - len;
707                         if (delta < 0)
708                                 delta += 17;
709                         precode_freqs[delta]++;
710                         *itemptr++ = delta;
711                         run_start++;
712                         continue;
713                 }
714
715                 /* Extend the run.  */
716                 do {
717                         run_end++;
718                 } while (len == lens[run_end]);
719
720                 if (len == 0) {
721                         /* Run of zeroes.  */
722
723                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
724                         while ((run_end - run_start) >= 20) {
725                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
726                                 precode_freqs[18]++;
727                                 *itemptr++ = 18 | (extra_bits << 5);
728                                 run_start += 20 + extra_bits;
729                         }
730
731                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
732                         if ((run_end - run_start) >= 4) {
733                                 extra_bits = min((run_end - run_start) - 4, 0xf);
734                                 precode_freqs[17]++;
735                                 *itemptr++ = 17 | (extra_bits << 5);
736                                 run_start += 4 + extra_bits;
737                         }
738                 } else {
739
740                         /* A run of nonzero lengths. */
741
742                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
743                         while ((run_end - run_start) >= 4) {
744                                 extra_bits = (run_end - run_start) > 4;
745                                 delta = prev_lens[run_start] - len;
746                                 if (delta < 0)
747                                         delta += 17;
748                                 precode_freqs[19]++;
749                                 precode_freqs[delta]++;
750                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
751                                 run_start += 4 + extra_bits;
752                         }
753                 }
754
755                 /* Output any remaining lengths without RLE.  */
756                 while (run_start != run_end) {
757                         delta = prev_lens[run_start] - len;
758                         if (delta < 0)
759                                 delta += 17;
760                         precode_freqs[delta]++;
761                         *itemptr++ = delta;
762                         run_start++;
763                 }
764         }
765
766         return itemptr - precode_items;
767 }
768
769 /*
770  * Output a Huffman code in the compressed form used in LZX.
771  *
772  * The Huffman code is represented in the output as a logical series of codeword
773  * lengths from which the Huffman code, which must be in canonical form, can be
774  * reconstructed.
775  *
776  * The codeword lengths are themselves compressed using a separate Huffman code,
777  * the "precode", which contains a symbol for each possible codeword length in
778  * the larger code as well as several special symbols to represent repeated
779  * codeword lengths (a form of run-length encoding).  The precode is itself
780  * constructed in canonical form, and its codeword lengths are represented
781  * literally in 20 4-bit fields that immediately precede the compressed codeword
782  * lengths of the larger code.
783  *
784  * Furthermore, the codeword lengths of the larger code are actually represented
785  * as deltas from the codeword lengths of the corresponding code in the previous
786  * block.
787  *
788  * @os:
789  *      Bitstream to which to write the compressed Huffman code.
790  * @lens:
791  *      The codeword lengths, indexed by symbol, in the Huffman code.
792  * @prev_lens:
793  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
794  *      code in the previous block, or all zeroes if this is the first block.
795  * @num_lens:
796  *      The number of symbols in the Huffman code.
797  */
798 static void
799 lzx_write_compressed_code(struct lzx_output_bitstream *os,
800                           const u8 lens[restrict],
801                           const u8 prev_lens[restrict],
802                           unsigned num_lens)
803 {
804         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
805         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
806         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
807         unsigned precode_items[num_lens];
808         unsigned num_precode_items;
809         unsigned precode_item;
810         unsigned precode_sym;
811         unsigned i;
812         u8 saved = lens[num_lens];
813         *(u8 *)(lens + num_lens) = 0x80;
814
815         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
816                 precode_freqs[i] = 0;
817
818         /* Compute the "items" (RLE / literal tokens and extra bits) with which
819          * the codeword lengths in the larger code will be output.  */
820         num_precode_items = lzx_compute_precode_items(lens,
821                                                       prev_lens,
822                                                       precode_freqs,
823                                                       precode_items);
824
825         /* Build the precode.  */
826         STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
827                       PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
828         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
829                                     PRE_CODEWORD_LIMIT,
830                                     precode_freqs, precode_lens,
831                                     precode_codewords);
832
833         /* Output the lengths of the codewords in the precode.  */
834         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
835                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
836
837         /* Output the encoded lengths of the codewords in the larger code.  */
838         for (i = 0; i < num_precode_items; i++) {
839                 precode_item = precode_items[i];
840                 precode_sym = precode_item & 0x1F;
841                 lzx_add_bits(os, precode_codewords[precode_sym],
842                              precode_lens[precode_sym]);
843                 if (precode_sym >= 17) {
844                         if (precode_sym == 17) {
845                                 lzx_add_bits(os, precode_item >> 5, 4);
846                         } else if (precode_sym == 18) {
847                                 lzx_add_bits(os, precode_item >> 5, 5);
848                         } else {
849                                 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
850                                 precode_sym = precode_item >> 6;
851                                 lzx_add_bits(os, precode_codewords[precode_sym],
852                                              precode_lens[precode_sym]);
853                         }
854                 }
855                 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
856                 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
857         }
858
859         *(u8 *)(lens + num_lens) = saved;
860 }
861
862 /*
863  * Write all matches and literal bytes (which were precomputed) in an LZX
864  * compressed block to the output bitstream in the final compressed
865  * representation.
866  *
867  * @os
868  *      The output bitstream.
869  * @block_type
870  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
871  *      LZX_BLOCKTYPE_VERBATIM).
872  * @block_data
873  *      The uncompressed data of the block.
874  * @sequences
875  *      The matches and literals to output, given as a series of sequences.
876  * @codes
877  *      The main, length, and aligned offset Huffman codes for the current
878  *      LZX compressed block.
879  */
880 static void
881 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
882                     const u8 *block_data, const struct lzx_sequence sequences[],
883                     const struct lzx_codes *codes)
884 {
885         const struct lzx_sequence *seq = sequences;
886         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
887
888         for (;;) {
889                 /* Output the next sequence.  */
890
891                 unsigned litrunlen = seq->litrunlen;
892                 unsigned match_hdr;
893                 unsigned main_symbol;
894                 unsigned adjusted_length;
895                 u32 adjusted_offset;
896                 unsigned offset_slot;
897                 unsigned num_extra_bits;
898                 u32 extra_bits;
899
900                 /* Output the literal run of the sequence.  */
901
902                 if (litrunlen) {  /* Is the literal run nonempty?  */
903
904                         /* Verify optimization is enabled on 64-bit  */
905                         STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
906                                       CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
907
908                         if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
909
910                                 /* 64-bit: write 4 literals at a time.  */
911                                 while (litrunlen >= 4) {
912                                         unsigned lit0 = block_data[0];
913                                         unsigned lit1 = block_data[1];
914                                         unsigned lit2 = block_data[2];
915                                         unsigned lit3 = block_data[3];
916                                         lzx_add_bits(os, codes->codewords.main[lit0],
917                                                      codes->lens.main[lit0]);
918                                         lzx_add_bits(os, codes->codewords.main[lit1],
919                                                      codes->lens.main[lit1]);
920                                         lzx_add_bits(os, codes->codewords.main[lit2],
921                                                      codes->lens.main[lit2]);
922                                         lzx_add_bits(os, codes->codewords.main[lit3],
923                                                      codes->lens.main[lit3]);
924                                         lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
925                                         block_data += 4;
926                                         litrunlen -= 4;
927                                 }
928                                 if (litrunlen--) {
929                                         unsigned lit = *block_data++;
930                                         lzx_add_bits(os, codes->codewords.main[lit],
931                                                      codes->lens.main[lit]);
932                                         if (litrunlen--) {
933                                                 unsigned lit = *block_data++;
934                                                 lzx_add_bits(os, codes->codewords.main[lit],
935                                                              codes->lens.main[lit]);
936                                                 if (litrunlen--) {
937                                                         unsigned lit = *block_data++;
938                                                         lzx_add_bits(os, codes->codewords.main[lit],
939                                                                      codes->lens.main[lit]);
940                                                         lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
941                                                 } else {
942                                                         lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
943                                                 }
944                                         } else {
945                                                 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
946                                         }
947                                 }
948                         } else {
949                                 /* 32-bit: write 1 literal at a time.  */
950                                 do {
951                                         unsigned lit = *block_data++;
952                                         lzx_add_bits(os, codes->codewords.main[lit],
953                                                      codes->lens.main[lit]);
954                                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
955                                 } while (--litrunlen);
956                         }
957                 }
958
959                 /* Was this the last literal run?  */
960                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
961                         return;
962
963                 /* Nope; output the match.  */
964
965                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
966                 main_symbol = LZX_NUM_CHARS + match_hdr;
967                 adjusted_length = seq->adjusted_length;
968
969                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
970
971                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
972                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
973
974                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
975                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
976
977         #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
978                                  14 + ALIGNED_CODEWORD_LIMIT)
979
980                 /* Verify optimization is enabled on 64-bit  */
981                 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
982
983                 /* Output the main symbol for the match.  */
984
985                 lzx_add_bits(os, codes->codewords.main[main_symbol],
986                              codes->lens.main[main_symbol]);
987                 if (!CAN_BUFFER(MAX_MATCH_BITS))
988                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
989
990                 /* If needed, output the length symbol for the match.  */
991
992                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
993                         lzx_add_bits(os, codes->codewords.len[adjusted_length -
994                                                               LZX_NUM_PRIMARY_LENS],
995                                      codes->lens.len[adjusted_length -
996                                                      LZX_NUM_PRIMARY_LENS]);
997                         if (!CAN_BUFFER(MAX_MATCH_BITS))
998                                 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
999                 }
1000
1001                 /* Output the extra offset bits for the match.  In aligned
1002                  * offset blocks, the lowest 3 bits of the adjusted offset are
1003                  * Huffman-encoded using the aligned offset code, provided that
1004                  * there are at least extra 3 offset bits required.  All other
1005                  * extra offset bits are output verbatim.  */
1006
1007                 if ((adjusted_offset & ones_if_aligned) >= 16) {
1008
1009                         lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
1010                                      num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1011                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1012                                 lzx_flush_bits(os, 14);
1013
1014                         lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1015                                                                   LZX_ALIGNED_OFFSET_BITMASK],
1016                                      codes->lens.aligned[adjusted_offset &
1017                                                          LZX_ALIGNED_OFFSET_BITMASK]);
1018                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1019                                 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1020                 } else {
1021                         STATIC_ASSERT(CAN_BUFFER(17));
1022
1023                         lzx_add_bits(os, extra_bits, num_extra_bits);
1024                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1025                                 lzx_flush_bits(os, 17);
1026                 }
1027
1028                 if (CAN_BUFFER(MAX_MATCH_BITS))
1029                         lzx_flush_bits(os, MAX_MATCH_BITS);
1030
1031                 /* Advance to the next sequence.  */
1032                 seq++;
1033         }
1034 }
1035
1036 static void
1037 lzx_write_compressed_block(const u8 *block_begin,
1038                            int block_type,
1039                            u32 block_size,
1040                            unsigned window_order,
1041                            unsigned num_main_syms,
1042                            const struct lzx_sequence sequences[],
1043                            const struct lzx_codes * codes,
1044                            const struct lzx_lens * prev_lens,
1045                            struct lzx_output_bitstream * os)
1046 {
1047         /* The first three bits indicate the type of block and are one of the
1048          * LZX_BLOCKTYPE_* constants.  */
1049         lzx_write_bits(os, block_type, 3);
1050
1051         /* Output the block size.
1052          *
1053          * The original LZX format seemed to always encode the block size in 3
1054          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
1055          * uses the first bit to indicate whether the block is the default size
1056          * (32768) or a different size given explicitly by the next 16 bits.
1057          *
1058          * By default, this compressor uses a window size of 32768 and therefore
1059          * follows the WIMGAPI behavior.  However, this compressor also supports
1060          * window sizes greater than 32768 bytes, which do not appear to be
1061          * supported by WIMGAPI.  In such cases, we retain the default size bit
1062          * to mean a size of 32768 bytes but output non-default block size in 24
1063          * bits rather than 16.  The compatibility of this behavior is unknown
1064          * because WIMs created with chunk size greater than 32768 can seemingly
1065          * only be opened by wimlib anyway.  */
1066         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1067                 lzx_write_bits(os, 1, 1);
1068         } else {
1069                 lzx_write_bits(os, 0, 1);
1070
1071                 if (window_order >= 16)
1072                         lzx_write_bits(os, block_size >> 16, 8);
1073
1074                 lzx_write_bits(os, block_size & 0xFFFF, 16);
1075         }
1076
1077         /* If it's an aligned offset block, output the aligned offset code.  */
1078         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1079                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1080                         lzx_write_bits(os, codes->lens.aligned[i],
1081                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
1082                 }
1083         }
1084
1085         /* Output the main code (two parts).  */
1086         lzx_write_compressed_code(os, codes->lens.main,
1087                                   prev_lens->main,
1088                                   LZX_NUM_CHARS);
1089         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1090                                   prev_lens->main + LZX_NUM_CHARS,
1091                                   num_main_syms - LZX_NUM_CHARS);
1092
1093         /* Output the length code.  */
1094         lzx_write_compressed_code(os, codes->lens.len,
1095                                   prev_lens->len,
1096                                   LZX_LENCODE_NUM_SYMBOLS);
1097
1098         /* Output the compressed matches and literals.  */
1099         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1100 }
1101
1102 /* Given the frequencies of symbols in an LZX-compressed block and the
1103  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1104  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1105  * will take fewer bits to output.  */
1106 static int
1107 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1108                                const struct lzx_codes * codes)
1109 {
1110         u32 aligned_cost = 0;
1111         u32 verbatim_cost = 0;
1112
1113         /* A verbatim block requires 3 bits in each place that an aligned symbol
1114          * would be used in an aligned offset block.  */
1115         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1116                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1117                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1118         }
1119
1120         /* Account for output of the aligned offset code.  */
1121         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1122
1123         if (aligned_cost < verbatim_cost)
1124                 return LZX_BLOCKTYPE_ALIGNED;
1125         else
1126                 return LZX_BLOCKTYPE_VERBATIM;
1127 }
1128
1129 /*
1130  * Return the offset slot for the specified adjusted match offset, using the
1131  * compressor's acceleration tables to speed up the mapping.
1132  */
1133 static inline unsigned
1134 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1135                          bool is_16_bit)
1136 {
1137         if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1138                 return c->offset_slot_tab_1[adjusted_offset];
1139         return c->offset_slot_tab_2[adjusted_offset >> 14];
1140 }
1141
1142 /*
1143  * Finish an LZX block:
1144  *
1145  * - build the Huffman codes
1146  * - decide whether to output the block as VERBATIM or ALIGNED
1147  * - output the block
1148  * - swap the indices of the current and previous Huffman codes
1149  */
1150 static void
1151 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1152                  const u8 *block_begin, u32 block_size, u32 seq_idx)
1153 {
1154         int block_type;
1155
1156         lzx_make_huffman_codes(c);
1157
1158         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1159                                                     &c->codes[c->codes_index]);
1160         lzx_write_compressed_block(block_begin,
1161                                    block_type,
1162                                    block_size,
1163                                    c->window_order,
1164                                    c->num_main_syms,
1165                                    &c->chosen_sequences[seq_idx],
1166                                    &c->codes[c->codes_index],
1167                                    &c->codes[c->codes_index ^ 1].lens,
1168                                    os);
1169         c->codes_index ^= 1;
1170 }
1171
1172 /* Tally the Huffman symbol for a literal and increment the literal run length.
1173  */
1174 static inline void
1175 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1176 {
1177         c->freqs.main[literal]++;
1178         ++*litrunlen_p;
1179 }
1180
1181 /* Tally the Huffman symbol for a match, save the match data and the length of
1182  * the preceding literal run in the next lzx_sequence, and update the recent
1183  * offsets queue.  */
1184 static inline void
1185 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1186                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1187                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1188 {
1189         u32 litrunlen = *litrunlen_p;
1190         struct lzx_sequence *next_seq = *next_seq_p;
1191         unsigned offset_slot;
1192         unsigned v;
1193
1194         v = length - LZX_MIN_MATCH_LEN;
1195
1196         /* Save the literal run length and adjusted length.  */
1197         next_seq->litrunlen = litrunlen;
1198         next_seq->adjusted_length = v;
1199
1200         /* Compute the length header and tally the length symbol if needed  */
1201         if (v >= LZX_NUM_PRIMARY_LENS) {
1202                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1203                 v = LZX_NUM_PRIMARY_LENS;
1204         }
1205
1206         /* Compute the offset slot  */
1207         offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1208
1209         /* Compute the match header.  */
1210         v += offset_slot * LZX_NUM_LEN_HEADERS;
1211
1212         /* Save the adjusted offset and match header.  */
1213         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1214
1215         /* Tally the main symbol.  */
1216         c->freqs.main[LZX_NUM_CHARS + v]++;
1217
1218         /* Update the recent offsets queue.  */
1219         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1220                 /* Repeat offset match  */
1221                 swap(recent_offsets[0], recent_offsets[offset_data]);
1222         } else {
1223                 /* Explicit offset match  */
1224
1225                 /* Tally the aligned offset symbol if needed  */
1226                 if (offset_data >= 16)
1227                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1228
1229                 recent_offsets[2] = recent_offsets[1];
1230                 recent_offsets[1] = recent_offsets[0];
1231                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1232         }
1233
1234         /* Reset the literal run length and advance to the next sequence.  */
1235         *next_seq_p = next_seq + 1;
1236         *litrunlen_p = 0;
1237 }
1238
1239 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1240  * there is no match.  This literal run may be empty.  */
1241 static inline void
1242 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1243 {
1244         last_seq->litrunlen = litrunlen;
1245
1246         /* Special value to mark last sequence  */
1247         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1248 }
1249
1250 /******************************************************************************/
1251
1252 /*
1253  * Block splitting algorithm.  The problem is to decide when it is worthwhile to
1254  * start a new block with new entropy codes.  There is a theoretically optimal
1255  * solution: recursively consider every possible block split, considering the
1256  * exact cost of each block, and choose the minimum cost approach.  But this is
1257  * far too slow.  Instead, as an approximation, we can count symbols and after
1258  * every N symbols, compare the expected distribution of symbols based on the
1259  * previous data with the actual distribution.  If they differ "by enough", then
1260  * start a new block.
1261  *
1262  * As an optimization and heuristic, we don't distinguish between every symbol
1263  * but rather we combine many symbols into a single "observation type".  For
1264  * literals we only look at the high bits and low bits, and for matches we only
1265  * look at whether the match is long or not.  The assumption is that for typical
1266  * "real" data, places that are good block boundaries will tend to be noticable
1267  * based only on changes in these aggregate frequencies, without looking for
1268  * subtle differences in individual symbols.  For example, a change from ASCII
1269  * bytes to non-ASCII bytes, or from few matches (generally less compressible)
1270  * to many matches (generally more compressible), would be easily noticed based
1271  * on the aggregates.
1272  *
1273  * For determining whether the frequency distributions are "different enough" to
1274  * start a new block, the simply heuristic of splitting when the sum of absolute
1275  * differences exceeds a constant seems to be good enough.  We also add a number
1276  * proportional to the block size so that the algorithm is more likely to end
1277  * large blocks than small blocks.  This reflects the general expectation that
1278  * it will become increasingly beneficial to start a new block as the current
1279  * blocks grows larger.
1280  *
1281  * Finally, for an approximation, it is not strictly necessary that the exact
1282  * symbols being used are considered.  With "near-optimal parsing", for example,
1283  * the actual symbols that will be used are unknown until after the block
1284  * boundary is chosen and the block has been optimized.  Since the final choices
1285  * cannot be used, we can use preliminary "greedy" choices instead.
1286  */
1287
1288 /* Initialize the block split statistics when starting a new block. */
1289 static void
1290 init_block_split_stats(struct block_split_stats *stats)
1291 {
1292         for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1293                 stats->new_observations[i] = 0;
1294                 stats->observations[i] = 0;
1295         }
1296         stats->num_new_observations = 0;
1297         stats->num_observations = 0;
1298 }
1299
1300 /* Literal observation.  Heuristic: use the top 2 bits and low 1 bits of the
1301  * literal, for 8 possible literal observation types.  */
1302 static inline void
1303 observe_literal(struct block_split_stats *stats, u8 lit)
1304 {
1305         stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
1306         stats->num_new_observations++;
1307 }
1308
1309 /* Match observation.  Heuristic: use one observation type for "short match" and
1310  * one observation type for "long match".  */
1311 static inline void
1312 observe_match(struct block_split_stats *stats, unsigned length)
1313 {
1314         stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 9)]++;
1315         stats->num_new_observations++;
1316 }
1317
1318 static bool
1319 do_end_block_check(struct block_split_stats *stats, u32 block_size)
1320 {
1321         if (stats->num_observations > 0) {
1322
1323                 /* Note: to avoid slow divisions, we do not divide by
1324                  * 'num_observations', but rather do all math with the numbers
1325                  * multiplied by 'num_observations'.  */
1326                 u32 total_delta = 0;
1327                 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1328                         u32 expected = stats->observations[i] * stats->num_new_observations;
1329                         u32 actual = stats->new_observations[i] * stats->num_observations;
1330                         u32 delta = (actual > expected) ? actual - expected :
1331                                                           expected - actual;
1332                         total_delta += delta;
1333                 }
1334
1335                 /* Ready to end the block? */
1336                 if (total_delta + (block_size >> 12) * stats->num_observations >=
1337                     200 * stats->num_observations)
1338                         return true;
1339         }
1340
1341         for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1342                 stats->num_observations += stats->new_observations[i];
1343                 stats->observations[i] += stats->new_observations[i];
1344                 stats->new_observations[i] = 0;
1345         }
1346         stats->num_new_observations = 0;
1347         return false;
1348 }
1349
1350 static inline bool
1351 should_end_block(struct block_split_stats *stats,
1352                  const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
1353 {
1354         /* Ready to check block split statistics? */
1355         if (stats->num_new_observations < 250 ||
1356             in_next - in_block_begin < MIN_BLOCK_SIZE ||
1357             in_end - in_next < MIN_BLOCK_SIZE)
1358                 return false;
1359
1360         return do_end_block_check(stats, in_next - in_block_begin);
1361 }
1362
1363 /******************************************************************************/
1364
1365 /*
1366  * Given the minimum-cost path computed through the item graph for the current
1367  * block, walk the path and count how many of each symbol in each Huffman-coded
1368  * alphabet would be required to output the items (matches and literals) along
1369  * the path.
1370  *
1371  * Note that the path will be walked backwards (from the end of the block to the
1372  * beginning of the block), but this doesn't matter because this function only
1373  * computes frequencies.
1374  */
1375 static inline void
1376 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1377 {
1378         u32 node_idx = block_size;
1379         for (;;) {
1380                 u32 len;
1381                 u32 offset_data;
1382                 unsigned v;
1383                 unsigned offset_slot;
1384
1385                 /* Tally literals until either a match or the beginning of the
1386                  * block is reached.  */
1387                 for (;;) {
1388                         u32 item = c->optimum_nodes[node_idx].item;
1389
1390                         len = item & OPTIMUM_LEN_MASK;
1391                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1392
1393                         if (len != 0)  /* Not a literal?  */
1394                                 break;
1395
1396                         /* Tally the main symbol for the literal.  */
1397                         c->freqs.main[offset_data]++;
1398
1399                         if (--node_idx == 0) /* Beginning of block was reached?  */
1400                                 return;
1401                 }
1402
1403                 node_idx -= len;
1404
1405                 /* Tally a match.  */
1406
1407                 /* Tally the aligned offset symbol if needed.  */
1408                 if (offset_data >= 16)
1409                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1410
1411                 /* Tally the length symbol if needed.  */
1412                 v = len - LZX_MIN_MATCH_LEN;;
1413                 if (v >= LZX_NUM_PRIMARY_LENS) {
1414                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1415                         v = LZX_NUM_PRIMARY_LENS;
1416                 }
1417
1418                 /* Tally the main symbol.  */
1419                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1420                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1421                 c->freqs.main[LZX_NUM_CHARS + v]++;
1422
1423                 if (node_idx == 0) /* Beginning of block was reached?  */
1424                         return;
1425         }
1426 }
1427
1428 /*
1429  * Like lzx_tally_item_list(), but this function also generates the list of
1430  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1431  * ready to be output to the bitstream after the Huffman codes are computed.
1432  * The lzx_sequences will be written to decreasing memory addresses as the path
1433  * is walked backwards, which means they will end up in the expected
1434  * first-to-last order.  The return value is the index in c->chosen_sequences at
1435  * which the lzx_sequences begin.
1436  */
1437 static inline u32
1438 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1439 {
1440         u32 node_idx = block_size;
1441         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1442         u32 lit_start_node;
1443
1444         /* Special value to mark last sequence  */
1445         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1446
1447         lit_start_node = node_idx;
1448         for (;;) {
1449                 u32 len;
1450                 u32 offset_data;
1451                 unsigned v;
1452                 unsigned offset_slot;
1453
1454                 /* Record literals until either a match or the beginning of the
1455                  * block is reached.  */
1456                 for (;;) {
1457                         u32 item = c->optimum_nodes[node_idx].item;
1458
1459                         len = item & OPTIMUM_LEN_MASK;
1460                         offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1461
1462                         if (len != 0) /* Not a literal?  */
1463                                 break;
1464
1465                         /* Tally the main symbol for the literal.  */
1466                         c->freqs.main[offset_data]++;
1467
1468                         if (--node_idx == 0) /* Beginning of block was reached?  */
1469                                 goto out;
1470                 }
1471
1472                 /* Save the literal run length for the next sequence (the
1473                  * "previous sequence" when walking backwards).  */
1474                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1475                 node_idx -= len;
1476                 lit_start_node = node_idx;
1477
1478                 /* Record a match.  */
1479
1480                 /* Tally the aligned offset symbol if needed.  */
1481                 if (offset_data >= 16)
1482                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1483
1484                 /* Save the adjusted length.  */
1485                 v = len - LZX_MIN_MATCH_LEN;
1486                 c->chosen_sequences[seq_idx].adjusted_length = v;
1487
1488                 /* Tally the length symbol if needed.  */
1489                 if (v >= LZX_NUM_PRIMARY_LENS) {
1490                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1491                         v = LZX_NUM_PRIMARY_LENS;
1492                 }
1493
1494                 /* Tally the main symbol.  */
1495                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1496                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1497                 c->freqs.main[LZX_NUM_CHARS + v]++;
1498
1499                 /* Save the adjusted offset and match header.  */
1500                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1501                                 (offset_data << 9) | v;
1502
1503                 if (node_idx == 0) /* Beginning of block was reached?  */
1504                         goto out;
1505         }
1506
1507 out:
1508         /* Save the literal run length for the first sequence.  */
1509         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1510
1511         /* Return the index in c->chosen_sequences at which the lzx_sequences
1512          * begin.  */
1513         return seq_idx;
1514 }
1515
1516 /*
1517  * Find an inexpensive path through the graph of possible match/literal choices
1518  * for the current block.  The nodes of the graph are
1519  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1520  * the current block, plus one extra node for end-of-block.  The edges of the
1521  * graph are matches and literals.  The goal is to find the minimum cost path
1522  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1523  *
1524  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1525  * proceeding forwards one node at a time.  At each node, a selection of matches
1526  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1527  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1528  * such a path is the lowest cost found so far to reach that later node, then
1529  * that later node is updated with the new path.
1530  *
1531  * Note that although this algorithm is based on minimum cost path search, due
1532  * to various simplifying assumptions the result is not guaranteed to be the
1533  * true minimum cost, or "optimal", path over the graph of all valid LZX
1534  * representations of this block.
1535  *
1536  * Also, note that because of the presence of the recent offsets queue (which is
1537  * a type of adaptive state), the algorithm cannot work backwards and compute
1538  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1539  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1540  * only an approximation.  It's possible for the globally optimal, minimum cost
1541  * path to contain a prefix, ending at a position, where that path prefix is
1542  * *not* the minimum cost path to that position.  This can happen if such a path
1543  * prefix results in a different adaptive state which results in lower costs
1544  * later.  The algorithm does not solve this problem; it only considers the
1545  * lowest cost to reach each individual position.
1546  */
1547 static inline struct lzx_lru_queue
1548 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1549                        const u8 * const restrict block_begin,
1550                        const u32 block_size,
1551                        const struct lzx_lru_queue initial_queue,
1552                        bool is_16_bit)
1553 {
1554         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1555         struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1556         struct lz_match *cache_ptr = c->match_cache;
1557         const u8 *in_next = block_begin;
1558         const u8 * const block_end = block_begin + block_size;
1559
1560         /* Instead of storing the match offset LRU queues in the
1561          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1562          * storing them in a smaller array.  This works because the algorithm
1563          * only requires a limited history of the adaptive state.  Once a given
1564          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1565          * it is no longer needed.  */
1566         struct lzx_lru_queue queues[512];
1567
1568         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1569 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1570
1571         /* Initially, the cost to reach each node is "infinity".  */
1572         memset(c->optimum_nodes, 0xFF,
1573                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1574
1575         QUEUE(block_begin) = initial_queue;
1576
1577         /* The following loop runs 'block_size' iterations, one per node.  */
1578         do {
1579                 unsigned num_matches;
1580                 unsigned literal;
1581                 u32 cost;
1582
1583                 /*
1584                  * A selection of matches for the block was already saved in
1585                  * memory so that we don't have to run the uncompressed data
1586                  * through the matchfinder on every optimization pass.  However,
1587                  * we still search for repeat offset matches during each
1588                  * optimization pass because we cannot predict the state of the
1589                  * recent offsets queue.  But as a heuristic, we don't bother
1590                  * searching for repeat offset matches if the general-purpose
1591                  * matchfinder failed to find any matches.
1592                  *
1593                  * Note that a match of length n at some offset implies there is
1594                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1595                  * that same offset.  In other words, we don't necessarily need
1596                  * to use the full length of a match.  The key heuristic that
1597                  * saves a significicant amount of time is that for each
1598                  * distinct length, we only consider the smallest offset for
1599                  * which that length is available.  This heuristic also applies
1600                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1601                  * any explicit offset.  Of course, this heuristic may be
1602                  * produce suboptimal results because offset slots in LZX are
1603                  * subject to entropy encoding, but in practice this is a useful
1604                  * heuristic.
1605                  */
1606
1607                 num_matches = cache_ptr->length;
1608                 cache_ptr++;
1609
1610                 if (num_matches) {
1611                         struct lz_match *end_matches = cache_ptr + num_matches;
1612                         unsigned next_len = LZX_MIN_MATCH_LEN;
1613                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1614                         const u8 *matchptr;
1615
1616                         /* Consider R0 match  */
1617                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1618                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1619                                 goto R0_done;
1620                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1621                         do {
1622                                 u32 cost = cur_node->cost +
1623                                            c->costs.match_cost[0][
1624                                                         next_len - LZX_MIN_MATCH_LEN];
1625                                 if (cost <= (cur_node + next_len)->cost) {
1626                                         (cur_node + next_len)->cost = cost;
1627                                         (cur_node + next_len)->item =
1628                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1629                                 }
1630                                 if (unlikely(++next_len > max_len)) {
1631                                         cache_ptr = end_matches;
1632                                         goto done_matches;
1633                                 }
1634                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1635
1636                 R0_done:
1637
1638                         /* Consider R1 match  */
1639                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1640                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1641                                 goto R1_done;
1642                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1643                                 goto R1_done;
1644                         for (unsigned len = 2; len < next_len - 1; len++)
1645                                 if (matchptr[len] != in_next[len])
1646                                         goto R1_done;
1647                         do {
1648                                 u32 cost = cur_node->cost +
1649                                            c->costs.match_cost[1][
1650                                                         next_len - LZX_MIN_MATCH_LEN];
1651                                 if (cost <= (cur_node + next_len)->cost) {
1652                                         (cur_node + next_len)->cost = cost;
1653                                         (cur_node + next_len)->item =
1654                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1655                                 }
1656                                 if (unlikely(++next_len > max_len)) {
1657                                         cache_ptr = end_matches;
1658                                         goto done_matches;
1659                                 }
1660                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1661
1662                 R1_done:
1663
1664                         /* Consider R2 match  */
1665                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1666                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1667                                 goto R2_done;
1668                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1669                                 goto R2_done;
1670                         for (unsigned len = 2; len < next_len - 1; len++)
1671                                 if (matchptr[len] != in_next[len])
1672                                         goto R2_done;
1673                         do {
1674                                 u32 cost = cur_node->cost +
1675                                            c->costs.match_cost[2][
1676                                                         next_len - LZX_MIN_MATCH_LEN];
1677                                 if (cost <= (cur_node + next_len)->cost) {
1678                                         (cur_node + next_len)->cost = cost;
1679                                         (cur_node + next_len)->item =
1680                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1681                                 }
1682                                 if (unlikely(++next_len > max_len)) {
1683                                         cache_ptr = end_matches;
1684                                         goto done_matches;
1685                                 }
1686                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1687
1688                 R2_done:
1689
1690                         while (next_len > cache_ptr->length)
1691                                 if (++cache_ptr == end_matches)
1692                                         goto done_matches;
1693
1694                         /* Consider explicit offset matches  */
1695                         do {
1696                                 u32 offset = cache_ptr->offset;
1697                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1698                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1699                                                                                 is_16_bit);
1700                                 u32 base_cost = cur_node->cost;
1701
1702                         #if LZX_CONSIDER_ALIGNED_COSTS
1703                                 if (offset_data >= 16)
1704                                         base_cost += c->costs.aligned[offset_data &
1705                                                                       LZX_ALIGNED_OFFSET_BITMASK];
1706                         #endif
1707
1708                                 do {
1709                                         u32 cost = base_cost +
1710                                                    c->costs.match_cost[offset_slot][
1711                                                                 next_len - LZX_MIN_MATCH_LEN];
1712                                         if (cost < (cur_node + next_len)->cost) {
1713                                                 (cur_node + next_len)->cost = cost;
1714                                                 (cur_node + next_len)->item =
1715                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1716                                         }
1717                                 } while (++next_len <= cache_ptr->length);
1718                         } while (++cache_ptr != end_matches);
1719                 }
1720
1721         done_matches:
1722
1723                 /* Consider coding a literal.
1724
1725                  * To avoid an extra branch, actually checking the preferability
1726                  * of coding the literal is integrated into the queue update
1727                  * code below.  */
1728                 literal = *in_next++;
1729                 cost = cur_node->cost + c->costs.main[literal];
1730
1731                 /* Advance to the next position.  */
1732                 cur_node++;
1733
1734                 /* The lowest-cost path to the current position is now known.
1735                  * Finalize the recent offsets queue that results from taking
1736                  * this lowest-cost path.  */
1737
1738                 if (cost <= cur_node->cost) {
1739                         /* Literal: queue remains unchanged.  */
1740                         cur_node->cost = cost;
1741                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1742                         QUEUE(in_next) = QUEUE(in_next - 1);
1743                 } else {
1744                         /* Match: queue update is needed.  */
1745                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1746                         u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1747                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1748                                 /* Explicit offset match: insert offset at front  */
1749                                 QUEUE(in_next) =
1750                                         lzx_lru_queue_push(QUEUE(in_next - len),
1751                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1752                         } else {
1753                                 /* Repeat offset match: swap offset to front  */
1754                                 QUEUE(in_next) =
1755                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1756                                                            offset_data);
1757                         }
1758                 }
1759         } while (cur_node != end_node);
1760
1761         /* Return the match offset queue at the end of the minimum cost path. */
1762         return QUEUE(block_end);
1763 }
1764
1765 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1766 static void
1767 lzx_compute_match_costs(struct lzx_compressor *c)
1768 {
1769         unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1770                                         LZX_NUM_LEN_HEADERS;
1771         struct lzx_costs *costs = &c->costs;
1772
1773         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1774
1775                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1776                 unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
1777                                                         LZX_NUM_LEN_HEADERS);
1778                 unsigned i;
1779
1780         #if LZX_CONSIDER_ALIGNED_COSTS
1781                 if (offset_slot >= 8)
1782                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1783         #endif
1784
1785                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1786                         costs->match_cost[offset_slot][i] =
1787                                 costs->main[main_symbol++] + extra_cost;
1788
1789                 extra_cost += costs->main[main_symbol];
1790
1791                 for (; i < LZX_NUM_LENS; i++)
1792                         costs->match_cost[offset_slot][i] =
1793                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1794         }
1795 }
1796
1797 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1798  * algorithm.  */
1799 static void
1800 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1801 {
1802         u32 i;
1803         bool have_byte[256];
1804         unsigned num_used_bytes;
1805
1806         /* The costs below are hard coded to use a scaling factor of 16.  */
1807         STATIC_ASSERT(LZX_BIT_COST == 16);
1808
1809         /*
1810          * Heuristics:
1811          *
1812          * - Use smaller initial costs for literal symbols when the input buffer
1813          *   contains fewer distinct bytes.
1814          *
1815          * - Assume that match symbols are more costly than literal symbols.
1816          *
1817          * - Assume that length symbols for shorter lengths are less costly than
1818          *   length symbols for longer lengths.
1819          */
1820
1821         for (i = 0; i < 256; i++)
1822                 have_byte[i] = false;
1823
1824         for (i = 0; i < block_size; i++)
1825                 have_byte[block[i]] = true;
1826
1827         num_used_bytes = 0;
1828         for (i = 0; i < 256; i++)
1829                 num_used_bytes += have_byte[i];
1830
1831         for (i = 0; i < 256; i++)
1832                 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1833
1834         for (; i < c->num_main_syms; i++)
1835                 c->costs.main[i] = 170;
1836
1837         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1838                 c->costs.len[i] = 103 + (i / 4);
1839
1840 #if LZX_CONSIDER_ALIGNED_COSTS
1841         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1842                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1843 #endif
1844
1845         lzx_compute_match_costs(c);
1846 }
1847
1848 /* Update the current cost model to reflect the computed Huffman codes.  */
1849 static void
1850 lzx_update_costs(struct lzx_compressor *c)
1851 {
1852         unsigned i;
1853         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1854
1855         for (i = 0; i < c->num_main_syms; i++) {
1856                 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
1857                                     MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
1858         }
1859
1860         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1861                 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
1862                                    LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
1863         }
1864
1865 #if LZX_CONSIDER_ALIGNED_COSTS
1866         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1867                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
1868                                        ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
1869         }
1870 #endif
1871
1872         lzx_compute_match_costs(c);
1873 }
1874
1875 static inline struct lzx_lru_queue
1876 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1877                              struct lzx_output_bitstream * const restrict os,
1878                              const u8 * const restrict block_begin,
1879                              const u32 block_size,
1880                              const struct lzx_lru_queue initial_queue,
1881                              bool is_16_bit)
1882 {
1883         unsigned num_passes_remaining = c->num_optim_passes;
1884         struct lzx_lru_queue new_queue;
1885         u32 seq_idx;
1886
1887         /* The first optimization pass uses a default cost model.  Each
1888          * additional optimization pass uses a cost model derived from the
1889          * Huffman code computed in the previous pass.  */
1890
1891         lzx_set_default_costs(c, block_begin, block_size);
1892         lzx_reset_symbol_frequencies(c);
1893         do {
1894                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1895                                                    initial_queue, is_16_bit);
1896                 if (num_passes_remaining > 1) {
1897                         lzx_tally_item_list(c, block_size, is_16_bit);
1898                         lzx_make_huffman_codes(c);
1899                         lzx_update_costs(c);
1900                         lzx_reset_symbol_frequencies(c);
1901                 }
1902         } while (--num_passes_remaining);
1903
1904         seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1905         lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1906         return new_queue;
1907 }
1908
1909 /*
1910  * This is the "near-optimal" LZX compressor.
1911  *
1912  * For each block, it performs a relatively thorough graph search to find an
1913  * inexpensive (in terms of compressed size) way to output that block.
1914  *
1915  * Note: there are actually many things this algorithm leaves on the table in
1916  * terms of compression ratio.  So although it may be "near-optimal", it is
1917  * certainly not "optimal".  The goal is not to produce the optimal compression
1918  * ratio, which for LZX is probably impossible within any practical amount of
1919  * time, but rather to produce a compression ratio significantly better than a
1920  * simpler "greedy" or "lazy" parse while still being relatively fast.
1921  */
1922 static inline void
1923 lzx_compress_near_optimal(struct lzx_compressor *c,
1924                           struct lzx_output_bitstream *os,
1925                           bool is_16_bit)
1926 {
1927         const u8 * const in_begin = c->in_buffer;
1928         const u8 *       in_next = in_begin;
1929         const u8 * const in_end  = in_begin + c->in_nbytes;
1930         u32 max_len = LZX_MAX_MATCH_LEN;
1931         u32 nice_len = min(c->nice_match_length, max_len);
1932         u32 next_hashes[2] = {};
1933         struct lzx_lru_queue queue;
1934
1935         CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
1936         lzx_lru_queue_init(&queue);
1937
1938         do {
1939                 /* Starting a new block  */
1940                 const u8 * const in_block_begin = in_next;
1941                 const u8 * const in_max_block_end =
1942                         in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
1943                 const u8 *next_observation = in_next;
1944
1945                 init_block_split_stats(&c->split_stats);
1946
1947                 /* Run the block through the matchfinder and cache the matches. */
1948                 struct lz_match *cache_ptr = c->match_cache;
1949                 do {
1950                         struct lz_match *lz_matchptr;
1951                         u32 best_len;
1952
1953                         /* If approaching the end of the input buffer, adjust
1954                          * 'max_len' and 'nice_len' accordingly.  */
1955                         if (unlikely(max_len > in_end - in_next)) {
1956                                 max_len = in_end - in_next;
1957                                 nice_len = min(max_len, nice_len);
1958                                 if (unlikely(max_len <
1959                                              BT_MATCHFINDER_REQUIRED_NBYTES))
1960                                 {
1961                                         in_next++;
1962                                         cache_ptr->length = 0;
1963                                         cache_ptr++;
1964                                         continue;
1965                                 }
1966                         }
1967
1968                         /* Check for matches.  */
1969                         lz_matchptr = CALL_BT_MF(is_16_bit, c,
1970                                                  bt_matchfinder_get_matches,
1971                                                  in_begin,
1972                                                  in_next - in_begin,
1973                                                  max_len,
1974                                                  nice_len,
1975                                                  c->max_search_depth,
1976                                                  next_hashes,
1977                                                  &best_len,
1978                                                  cache_ptr + 1);
1979
1980                         if (in_next >= next_observation) {
1981                                 best_len = 0;
1982                                 if (lz_matchptr > cache_ptr + 1)
1983                                         best_len = (lz_matchptr - 1)->length;
1984                                 if (best_len >= 2) {
1985                                         observe_match(&c->split_stats, best_len);
1986                                         next_observation = in_next + best_len;
1987                                 } else {
1988                                         observe_literal(&c->split_stats, *in_next);
1989                                         next_observation = in_next + 1;
1990                                 }
1991                         }
1992
1993                         in_next++;
1994                         cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1995                         cache_ptr = lz_matchptr;
1996
1997                         /*
1998                          * If there was a very long match found, then don't
1999                          * cache any matches for the bytes covered by that
2000                          * match.  This avoids degenerate behavior when
2001                          * compressing highly redundant data, where the number
2002                          * of matches can be very large.
2003                          *
2004                          * This heuristic doesn't actually hurt the compression
2005                          * ratio very much.  If there's a long match, then the
2006                          * data must be highly compressible, so it doesn't
2007                          * matter as much what we do.
2008                          */
2009                         if (best_len >= nice_len) {
2010                                 --best_len;
2011                                 do {
2012                                         if (unlikely(max_len > in_end - in_next)) {
2013                                                 max_len = in_end - in_next;
2014                                                 nice_len = min(max_len, nice_len);
2015                                                 if (unlikely(max_len <
2016                                                              BT_MATCHFINDER_REQUIRED_NBYTES))
2017                                                 {
2018                                                         in_next++;
2019                                                         cache_ptr->length = 0;
2020                                                         cache_ptr++;
2021                                                         continue;
2022                                                 }
2023                                         }
2024                                         CALL_BT_MF(is_16_bit, c,
2025                                                    bt_matchfinder_skip_position,
2026                                                    in_begin,
2027                                                    in_next - in_begin,
2028                                                    max_len,
2029                                                    nice_len,
2030                                                    c->max_search_depth,
2031                                                    next_hashes);
2032                                         in_next++;
2033                                         cache_ptr->length = 0;
2034                                         cache_ptr++;
2035                                 } while (--best_len);
2036                         }
2037                 } while (in_next < in_max_block_end &&
2038                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]) &&
2039                          !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2040
2041                 /* We've finished running the block through the matchfinder.
2042                  * Now choose a match/literal sequence and write the block.  */
2043
2044                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
2045                                                      in_next - in_block_begin,
2046                                                      queue, is_16_bit);
2047         } while (in_next != in_end);
2048 }
2049
2050 static void
2051 lzx_compress_near_optimal_16(struct lzx_compressor *c,
2052                              struct lzx_output_bitstream *os)
2053 {
2054         lzx_compress_near_optimal(c, os, true);
2055 }
2056
2057 static void
2058 lzx_compress_near_optimal_32(struct lzx_compressor *c,
2059                              struct lzx_output_bitstream *os)
2060 {
2061         lzx_compress_near_optimal(c, os, false);
2062 }
2063
2064 /*
2065  * Given a pointer to the current byte sequence and the current list of recent
2066  * match offsets, find the longest repeat offset match.
2067  *
2068  * If no match of at least 2 bytes is found, then return 0.
2069  *
2070  * If a match of at least 2 bytes is found, then return its length and set
2071  * *rep_max_idx_ret to the index of its offset in @queue.
2072 */
2073 static unsigned
2074 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
2075                                      const u32 bytes_remaining,
2076                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
2077                                      unsigned *rep_max_idx_ret)
2078 {
2079         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2080
2081         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
2082         const u16 next_2_bytes = load_u16_unaligned(in_next);
2083         const u8 *matchptr;
2084         unsigned rep_max_len;
2085         unsigned rep_max_idx;
2086         unsigned rep_len;
2087
2088         matchptr = in_next - recent_offsets[0];
2089         if (load_u16_unaligned(matchptr) == next_2_bytes)
2090                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
2091         else
2092                 rep_max_len = 0;
2093         rep_max_idx = 0;
2094
2095         matchptr = in_next - recent_offsets[1];
2096         if (load_u16_unaligned(matchptr) == next_2_bytes) {
2097                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2098                 if (rep_len > rep_max_len) {
2099                         rep_max_len = rep_len;
2100                         rep_max_idx = 1;
2101                 }
2102         }
2103
2104         matchptr = in_next - recent_offsets[2];
2105         if (load_u16_unaligned(matchptr) == next_2_bytes) {
2106                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2107                 if (rep_len > rep_max_len) {
2108                         rep_max_len = rep_len;
2109                         rep_max_idx = 2;
2110                 }
2111         }
2112
2113         *rep_max_idx_ret = rep_max_idx;
2114         return rep_max_len;
2115 }
2116
2117 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
2118 static inline unsigned
2119 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
2120 {
2121         unsigned score = len;
2122
2123         if (adjusted_offset < 4096)
2124                 score++;
2125
2126         if (adjusted_offset < 256)
2127                 score++;
2128
2129         return score;
2130 }
2131
2132 static inline unsigned
2133 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2134 {
2135         return rep_len + 3;
2136 }
2137
2138 /* This is the "lazy" LZX compressor.  */
2139 static inline void
2140 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2141                   bool is_16_bit)
2142 {
2143         const u8 * const in_begin = c->in_buffer;
2144         const u8 *       in_next = in_begin;
2145         const u8 * const in_end  = in_begin + c->in_nbytes;
2146         unsigned max_len = LZX_MAX_MATCH_LEN;
2147         unsigned nice_len = min(c->nice_match_length, max_len);
2148         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2149         u32 recent_offsets[3] = {1, 1, 1};
2150         u32 next_hashes[2] = {};
2151
2152         CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2153
2154         do {
2155                 /* Starting a new block  */
2156
2157                 const u8 * const in_block_begin = in_next;
2158                 const u8 * const in_max_block_end =
2159                         in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2160                 struct lzx_sequence *next_seq = c->chosen_sequences;
2161                 unsigned cur_len;
2162                 u32 cur_offset;
2163                 u32 cur_offset_data;
2164                 unsigned cur_score;
2165                 unsigned next_len;
2166                 u32 next_offset;
2167                 u32 next_offset_data;
2168                 unsigned next_score;
2169                 unsigned rep_max_len;
2170                 unsigned rep_max_idx;
2171                 unsigned rep_score;
2172                 unsigned skip_len;
2173                 u32 litrunlen = 0;
2174
2175                 lzx_reset_symbol_frequencies(c);
2176                 init_block_split_stats(&c->split_stats);
2177
2178                 do {
2179                         if (unlikely(max_len > in_end - in_next)) {
2180                                 max_len = in_end - in_next;
2181                                 nice_len = min(max_len, nice_len);
2182                         }
2183
2184                         /* Find the longest match at the current position.  */
2185
2186                         cur_len = CALL_HC_MF(is_16_bit, c,
2187                                              hc_matchfinder_longest_match,
2188                                              in_begin,
2189                                              in_next - in_begin,
2190                                              2,
2191                                              max_len,
2192                                              nice_len,
2193                                              c->max_search_depth,
2194                                              next_hashes,
2195                                              &cur_offset);
2196                         if (cur_len < 3 ||
2197                             (cur_len == 3 &&
2198                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2199                              cur_offset != recent_offsets[0] &&
2200                              cur_offset != recent_offsets[1] &&
2201                              cur_offset != recent_offsets[2]))
2202                         {
2203                                 /* There was no match found, or the only match found
2204                                  * was a distant length 3 match.  Output a literal.  */
2205                                 lzx_record_literal(c, *in_next, &litrunlen);
2206                                 observe_literal(&c->split_stats, *in_next);
2207                                 in_next++;
2208                                 continue;
2209                         }
2210
2211                         observe_match(&c->split_stats, cur_len);
2212
2213                         if (cur_offset == recent_offsets[0]) {
2214                                 in_next++;
2215                                 cur_offset_data = 0;
2216                                 skip_len = cur_len - 1;
2217                                 goto choose_cur_match;
2218                         }
2219
2220                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2221                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2222
2223                         /* Consider a repeat offset match  */
2224                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2225                                                                            in_end - in_next,
2226                                                                            recent_offsets,
2227                                                                            &rep_max_idx);
2228                         in_next++;
2229
2230                         if (rep_max_len >= 3 &&
2231                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2232                                                                        rep_max_idx)) >= cur_score)
2233                         {
2234                                 cur_len = rep_max_len;
2235                                 cur_offset_data = rep_max_idx;
2236                                 skip_len = rep_max_len - 1;
2237                                 goto choose_cur_match;
2238                         }
2239
2240                 have_cur_match:
2241
2242                         /* We have a match at the current position.  */
2243
2244                         /* If we have a very long match, choose it immediately.  */
2245                         if (cur_len >= nice_len) {
2246                                 skip_len = cur_len - 1;
2247                                 goto choose_cur_match;
2248                         }
2249
2250                         /* See if there's a better match at the next position.  */
2251
2252                         if (unlikely(max_len > in_end - in_next)) {
2253                                 max_len = in_end - in_next;
2254                                 nice_len = min(max_len, nice_len);
2255                         }
2256
2257                         next_len = CALL_HC_MF(is_16_bit, c,
2258                                               hc_matchfinder_longest_match,
2259                                               in_begin,
2260                                               in_next - in_begin,
2261                                               cur_len - 2,
2262                                               max_len,
2263                                               nice_len,
2264                                               c->max_search_depth / 2,
2265                                               next_hashes,
2266                                               &next_offset);
2267
2268                         if (next_len <= cur_len - 2) {
2269                                 in_next++;
2270                                 skip_len = cur_len - 2;
2271                                 goto choose_cur_match;
2272                         }
2273
2274                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2275                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2276
2277                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2278                                                                            in_end - in_next,
2279                                                                            recent_offsets,
2280                                                                            &rep_max_idx);
2281                         in_next++;
2282
2283                         if (rep_max_len >= 3 &&
2284                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2285                                                                        rep_max_idx)) >= next_score)
2286                         {
2287
2288                                 if (rep_score > cur_score) {
2289                                         /* The next match is better, and it's a
2290                                          * repeat offset match.  */
2291                                         lzx_record_literal(c, *(in_next - 2),
2292                                                            &litrunlen);
2293                                         cur_len = rep_max_len;
2294                                         cur_offset_data = rep_max_idx;
2295                                         skip_len = cur_len - 1;
2296                                         goto choose_cur_match;
2297                                 }
2298                         } else {
2299                                 if (next_score > cur_score) {
2300                                         /* The next match is better, and it's an
2301                                          * explicit offset match.  */
2302                                         lzx_record_literal(c, *(in_next - 2),
2303                                                            &litrunlen);
2304                                         cur_len = next_len;
2305                                         cur_offset_data = next_offset_data;
2306                                         cur_score = next_score;
2307                                         goto have_cur_match;
2308                                 }
2309                         }
2310
2311                         /* The original match was better.  */
2312                         skip_len = cur_len - 2;
2313
2314                 choose_cur_match:
2315                         lzx_record_match(c, cur_len, cur_offset_data,
2316                                          recent_offsets, is_16_bit,
2317                                          &litrunlen, &next_seq);
2318                         in_next = CALL_HC_MF(is_16_bit, c,
2319                                              hc_matchfinder_skip_positions,
2320                                              in_begin,
2321                                              in_next - in_begin,
2322                                              in_end - in_begin,
2323                                              skip_len,
2324                                              next_hashes);
2325                 } while (in_next < in_max_block_end &&
2326                          !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2327
2328                 lzx_finish_sequence(next_seq, litrunlen);
2329
2330                 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2331
2332         } while (in_next != in_end);
2333 }
2334
2335 static void
2336 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2337 {
2338         lzx_compress_lazy(c, os, true);
2339 }
2340
2341 static void
2342 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2343 {
2344         lzx_compress_lazy(c, os, false);
2345 }
2346
2347 /* Generate the acceleration tables for offset slots.  */
2348 static void
2349 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2350 {
2351         u32 adjusted_offset = 0;
2352         unsigned slot = 0;
2353
2354         /* slots [0, 29]  */
2355         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2356              adjusted_offset++)
2357         {
2358                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2359                         slot++;
2360                 c->offset_slot_tab_1[adjusted_offset] = slot;
2361         }
2362
2363         /* slots [30, 49]  */
2364         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2365              adjusted_offset += (u32)1 << 14)
2366         {
2367                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2368                         slot++;
2369                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2370         }
2371 }
2372
2373 static size_t
2374 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2375 {
2376         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2377                 if (lzx_is_16_bit(max_bufsize))
2378                         return offsetof(struct lzx_compressor, hc_mf_16) +
2379                                hc_matchfinder_size_16(max_bufsize);
2380                 else
2381                         return offsetof(struct lzx_compressor, hc_mf_32) +
2382                                hc_matchfinder_size_32(max_bufsize);
2383         } else {
2384                 if (lzx_is_16_bit(max_bufsize))
2385                         return offsetof(struct lzx_compressor, bt_mf_16) +
2386                                bt_matchfinder_size_16(max_bufsize);
2387                 else
2388                         return offsetof(struct lzx_compressor, bt_mf_32) +
2389                                bt_matchfinder_size_32(max_bufsize);
2390         }
2391 }
2392
2393 static u64
2394 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2395                       bool destructive)
2396 {
2397         u64 size = 0;
2398
2399         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2400                 return 0;
2401
2402         size += lzx_get_compressor_size(max_bufsize, compression_level);
2403         if (!destructive)
2404                 size += max_bufsize; /* in_buffer */
2405         return size;
2406 }
2407
2408 static int
2409 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2410                       bool destructive, void **c_ret)
2411 {
2412         unsigned window_order;
2413         struct lzx_compressor *c;
2414
2415         window_order = lzx_get_window_order(max_bufsize);
2416         if (window_order == 0)
2417                 return WIMLIB_ERR_INVALID_PARAM;
2418
2419         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2420         if (!c)
2421                 goto oom0;
2422
2423         c->destructive = destructive;
2424
2425         c->num_main_syms = lzx_get_num_main_syms(window_order);
2426         c->window_order = window_order;
2427
2428         if (!c->destructive) {
2429                 c->in_buffer = MALLOC(max_bufsize);
2430                 if (!c->in_buffer)
2431                         goto oom1;
2432         }
2433
2434         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2435
2436                 /* Fast compression: Use lazy parsing.  */
2437
2438                 if (lzx_is_16_bit(max_bufsize))
2439                         c->impl = lzx_compress_lazy_16;
2440                 else
2441                         c->impl = lzx_compress_lazy_32;
2442                 c->max_search_depth = (60 * compression_level) / 20;
2443                 c->nice_match_length = (80 * compression_level) / 20;
2444
2445                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2446                  * halves the max_search_depth when attempting a lazy match, and
2447                  * max_search_depth cannot be 0.  */
2448                 if (c->max_search_depth < 2)
2449                         c->max_search_depth = 2;
2450         } else {
2451
2452                 /* Normal / high compression: Use near-optimal parsing.  */
2453
2454                 if (lzx_is_16_bit(max_bufsize))
2455                         c->impl = lzx_compress_near_optimal_16;
2456                 else
2457                         c->impl = lzx_compress_near_optimal_32;
2458
2459                 /* Scale nice_match_length and max_search_depth with the
2460                  * compression level.  */
2461                 c->max_search_depth = (24 * compression_level) / 50;
2462                 c->nice_match_length = (48 * compression_level) / 50;
2463
2464                 /* Set a number of optimization passes appropriate for the
2465                  * compression level.  */
2466
2467                 c->num_optim_passes = 1;
2468
2469                 if (compression_level >= 45)
2470                         c->num_optim_passes++;
2471
2472                 /* Use more optimization passes for higher compression levels.
2473                  * But the more passes there are, the less they help --- so
2474                  * don't add them linearly.  */
2475                 if (compression_level >= 70) {
2476                         c->num_optim_passes++;
2477                         if (compression_level >= 100)
2478                                 c->num_optim_passes++;
2479                         if (compression_level >= 150)
2480                                 c->num_optim_passes++;
2481                         if (compression_level >= 200)
2482                                 c->num_optim_passes++;
2483                         if (compression_level >= 300)
2484                                 c->num_optim_passes++;
2485                 }
2486         }
2487
2488         /* max_search_depth == 0 is invalid.  */
2489         if (c->max_search_depth < 1)
2490                 c->max_search_depth = 1;
2491
2492         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2493                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2494
2495         lzx_init_offset_slot_tabs(c);
2496         *c_ret = c;
2497         return 0;
2498
2499 oom1:
2500         FREE(c);
2501 oom0:
2502         return WIMLIB_ERR_NOMEM;
2503 }
2504
2505 static size_t
2506 lzx_compress(const void *restrict in, size_t in_nbytes,
2507              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2508 {
2509         struct lzx_compressor *c = _c;
2510         struct lzx_output_bitstream os;
2511         size_t result;
2512
2513         /* Don't bother trying to compress very small inputs.  */
2514         if (in_nbytes < 100)
2515                 return 0;
2516
2517         /* Copy the input data into the internal buffer and preprocess it.  */
2518         if (c->destructive)
2519                 c->in_buffer = (void *)in;
2520         else
2521                 memcpy(c->in_buffer, in, in_nbytes);
2522         c->in_nbytes = in_nbytes;
2523         lzx_preprocess(c->in_buffer, in_nbytes);
2524
2525         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2526         c->codes_index = 0;
2527         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2528
2529         /* Initialize the output bitstream.  */
2530         lzx_init_output(&os, out, out_nbytes_avail);
2531
2532         /* Call the compression level-specific compress() function.  */
2533         (*c->impl)(c, &os);
2534
2535         /* Flush the output bitstream and return the compressed size or 0.  */
2536         result = lzx_flush_output(&os);
2537         if (!result && c->destructive)
2538                 lzx_postprocess(c->in_buffer, c->in_nbytes);
2539         return result;
2540 }
2541
2542 static void
2543 lzx_free_compressor(void *_c)
2544 {
2545         struct lzx_compressor *c = _c;
2546
2547         if (!c->destructive)
2548                 FREE(c->in_buffer);
2549         FREE(c);
2550 }
2551
2552 const struct compressor_ops lzx_compressor_ops = {
2553         .get_needed_memory  = lzx_get_needed_memory,
2554         .create_compressor  = lzx_create_compressor,
2555         .compress           = lzx_compress,
2556         .free_compressor    = lzx_free_compressor,
2557 };