]> wimlib.net Git - wimlib/blob - src/lzx_compress.c
lzx_compress: cleanup
[wimlib] / src / lzx_compress.c
1 /*
2  * lzx_compress.c
3  *
4  * A compressor for the LZX compression format, as used in WIM files.
5  */
6
7 /*
8  * Copyright (C) 2012-2016 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24
25 /*
26  * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27  * compression format, as used in the WIM (Windows IMaging) file format.
28  *
29  * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30  * "Near-optimal" is significantly slower than "lazy", but results in a better
31  * compression ratio.  The "near-optimal" algorithm is used at the default
32  * compression level.
33  *
34  * This file may need some slight modifications to be used outside of the WIM
35  * format.  In particular, in other situations the LZX block header might be
36  * slightly different, and sliding window support might be required.
37  *
38  * Note: LZX is a compression format derived from DEFLATE, the format used by
39  * zlib and gzip.  Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40  * Certain details are quite similar, such as the method for storing Huffman
41  * codes.  However, the main differences are:
42  *
43  * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44  *   compressible before attempting to compress it further.
45  *
46  * - LZX uses a "main" alphabet which combines literals and matches, with the
47  *   match symbols containing a "length header" (giving all or part of the match
48  *   length) and an "offset slot" (giving, roughly speaking, the order of
49  *   magnitude of the match offset).
50  *
51  * - LZX does not have static Huffman blocks (that is, the kind with preset
52  *   Huffman codes); however it does have two types of dynamic Huffman blocks
53  *   ("verbatim" and "aligned").
54  *
55  * - LZX has a minimum match length of 2 rather than 3.  Length 2 matches can be
56  *   useful, but generally only if the parser is smart about choosing them.
57  *
58  * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59  *   of match offsets.  This is very useful for certain types of files, such as
60  *   binary files that have repeating records.
61  */
62
63 #ifdef HAVE_CONFIG_H
64 #  include "config.h"
65 #endif
66
67 /*
68  * The compressor always chooses a block of at least MIN_BLOCK_SIZE bytes,
69  * except if the last block has to be shorter.
70  */
71 #define MIN_BLOCK_SIZE          6500
72
73 /*
74  * The compressor attempts to end blocks after SOFT_MAX_BLOCK_SIZE bytes, but
75  * the final size might be larger due to matches extending beyond the end of the
76  * block.  Specifically:
77  *
78  *  - The greedy parser may choose an arbitrarily long match starting at the
79  *    SOFT_MAX_BLOCK_SIZE'th byte.
80  *
81  *  - The lazy parser may choose a sequence of literals starting at the
82  *    SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasing good
83  *    matches.  The final match may be of arbitrary length.  The length of the
84  *    literal sequence is approximately limited by the "nice match length"
85  *    parameter.
86  */
87 #define SOFT_MAX_BLOCK_SIZE     100000
88
89 /*
90  * The number of observed matches or literals that represents sufficient data to
91  * decide whether the current block should be terminated or not.
92  */
93 #define NUM_OBSERVATIONS_PER_BLOCK_CHECK        500
94
95 /*
96  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
97  * excluding the extra "overflow" entries.  This value should be high enough so
98  * that nearly the time, all matches found in a given block can fit in the match
99  * cache.  However, fallback behavior (immediately terminating the block) on
100  * cache overflow is still required.
101  */
102 #define LZX_CACHE_LENGTH        (SOFT_MAX_BLOCK_SIZE * 5)
103
104 /*
105  * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
106  * ever be saved in the match cache for a single position.  Since each match we
107  * save for a single position has a distinct length, we can use the number of
108  * possible match lengths in LZX as this bound.  This bound is guaranteed to be
109  * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
110  * it will never actually be reached.
111  */
112 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
113
114 /*
115  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
116  * This makes it possible to consider fractional bit costs.
117  *
118  * Note: this is only useful as a statistical trick for when the true costs are
119  * unknown.  In reality, each token in LZX requires a whole number of bits to
120  * output.
121  */
122 #define LZX_BIT_COST            64
123
124 /*
125  * Should the compressor take into account the costs of aligned offset symbols?
126  */
127 #define LZX_CONSIDER_ALIGNED_COSTS      1
128
129 /*
130  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
131  * faster algorithm.
132  */
133 #define LZX_MAX_FAST_LEVEL      34
134
135 /*
136  * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
137  * hash table for finding length 2 matches.  This could be as high as 16, but
138  * using a smaller hash table speeds up compression due to reduced cache
139  * pressure.
140  */
141 #define BT_MATCHFINDER_HASH2_ORDER      12
142
143 /*
144  * These are the compressor-side limits on the codeword lengths for each Huffman
145  * code.  To make outputting bits slightly faster, some of these limits are
146  * lower than the limits defined by the LZX format.  This does not significantly
147  * affect the compression ratio, at least for the block sizes we use.
148  */
149 #define MAIN_CODEWORD_LIMIT     16
150 #define LENGTH_CODEWORD_LIMIT   12
151 #define ALIGNED_CODEWORD_LIMIT  7
152 #define PRE_CODEWORD_LIMIT      7
153
154 #include "wimlib/compress_common.h"
155 #include "wimlib/compressor_ops.h"
156 #include "wimlib/error.h"
157 #include "wimlib/lz_extend.h"
158 #include "wimlib/lzx_common.h"
159 #include "wimlib/unaligned.h"
160 #include "wimlib/util.h"
161
162 /* Matchfinders with 16-bit positions  */
163 #define mf_pos_t        u16
164 #define MF_SUFFIX       _16
165 #include "wimlib/bt_matchfinder.h"
166 #include "wimlib/hc_matchfinder.h"
167
168 /* Matchfinders with 32-bit positions  */
169 #undef mf_pos_t
170 #undef MF_SUFFIX
171 #define mf_pos_t        u32
172 #define MF_SUFFIX       _32
173 #include "wimlib/bt_matchfinder.h"
174 #include "wimlib/hc_matchfinder.h"
175
176 struct lzx_output_bitstream;
177
178 /* Codewords for the LZX Huffman codes.  */
179 struct lzx_codewords {
180         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
181         u32 len[LZX_LENCODE_NUM_SYMBOLS];
182         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
183 };
184
185 /* Codeword lengths (in bits) for the LZX Huffman codes.
186  * A zero length means the corresponding codeword has zero frequency.  */
187 struct lzx_lens {
188         u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
189         u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
190         u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
191 };
192
193 /* Cost model for near-optimal parsing  */
194 struct lzx_costs {
195
196         /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
197          * length 'len' match that has an offset belonging to 'offset_slot'.  */
198         u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
199
200         /* Cost for each symbol in the main code  */
201         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
202
203         /* Cost for each symbol in the length code  */
204         u32 len[LZX_LENCODE_NUM_SYMBOLS];
205
206 #if LZX_CONSIDER_ALIGNED_COSTS
207         /* Cost for each symbol in the aligned code  */
208         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
209 #endif
210 };
211
212 /* Codewords and lengths for the LZX Huffman codes.  */
213 struct lzx_codes {
214         struct lzx_codewords codewords;
215         struct lzx_lens lens;
216 };
217
218 /* Symbol frequency counters for the LZX Huffman codes.  */
219 struct lzx_freqs {
220         u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
221         u32 len[LZX_LENCODE_NUM_SYMBOLS];
222         u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
223 };
224
225 /* Block split statistics.  See "Block splitting algorithm" below. */
226 #define NUM_LITERAL_OBSERVATION_TYPES 8
227 #define NUM_MATCH_OBSERVATION_TYPES 2
228 #define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
229 struct block_split_stats {
230         u32 new_observations[NUM_OBSERVATION_TYPES];
231         u32 observations[NUM_OBSERVATION_TYPES];
232         u32 num_new_observations;
233         u32 num_observations;
234 };
235
236 /*
237  * Represents a run of literals followed by a match or end-of-block.  This
238  * struct is needed to temporarily store items chosen by the parser, since items
239  * cannot be written until all items for the block have been chosen and the
240  * block's Huffman codes have been computed.
241  */
242 struct lzx_sequence {
243
244         /* The number of literals in the run.  This may be 0.  The literals are
245          * not stored explicitly in this structure; instead, they are read
246          * directly from the uncompressed data.  */
247         u16 litrunlen;
248
249         /* If the next field doesn't indicate end-of-block, then this is the
250          * match length minus LZX_MIN_MATCH_LEN.  */
251         u16 adjusted_length;
252
253         /* If bit 31 is clear, then this field contains the match header in bits
254          * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
255          * recent offset code in bits 9-30.  Otherwise (if bit 31 is set), this
256          * sequence's literal run was the last literal run in the block, so
257          * there is no match that follows it.  */
258         u32 adjusted_offset_and_match_hdr;
259 };
260
261 /*
262  * This structure represents a byte position in the input buffer and a node in
263  * the graph of possible match/literal choices.
264  *
265  * Logically, each incoming edge to this node is labeled with a literal or a
266  * match that can be taken to reach this position from an earlier position; and
267  * each outgoing edge from this node is labeled with a literal or a match that
268  * can be taken to advance from this position to a later position.
269  */
270 struct lzx_optimum_node {
271
272         /* The cost, in bits, of the lowest-cost path that has been found to
273          * reach this position.  This can change as progressively lower cost
274          * paths are found to reach this position.  */
275         u32 cost;
276
277         /*
278          * The match or literal that was taken to reach this position.  This can
279          * change as progressively lower cost paths are found to reach this
280          * position.
281          *
282          * This variable is divided into two bitfields.
283          *
284          * Literals:
285          *      Low bits are 0, high bits are the literal.
286          *
287          * Explicit offset matches:
288          *      Low bits are the match length, high bits are the offset plus 2.
289          *
290          * Repeat offset matches:
291          *      Low bits are the match length, high bits are the queue index.
292          */
293         u32 item;
294 #define OPTIMUM_OFFSET_SHIFT 9
295 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
296 #define OPTIMUM_EXTRA_FLAG 0x80000000
297         u32 extra_match;
298         u32 extra_literal;
299 } _aligned_attribute(8);
300
301 /*
302  * Least-recently-used queue for match offsets.
303  *
304  * This is represented as a 64-bit integer for efficiency.  There are three
305  * offsets of 21 bits each.  Bit 64 is garbage.
306  */
307 struct lzx_lru_queue {
308         u64 R;
309 };
310
311 #define LZX_QUEUE64_OFFSET_SHIFT 21
312 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
313
314 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
315 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
316 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
317
318 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
319 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
320 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
321
322 static inline void
323 lzx_lru_queue_init(struct lzx_lru_queue *queue)
324 {
325         queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
326                    ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
327                    ((u64)1 << LZX_QUEUE64_R2_SHIFT);
328 }
329
330 static inline u64
331 lzx_lru_queue_R0(struct lzx_lru_queue queue)
332 {
333         return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
334 }
335
336 static inline u64
337 lzx_lru_queue_R1(struct lzx_lru_queue queue)
338 {
339         return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
340 }
341
342 static inline u64
343 lzx_lru_queue_R2(struct lzx_lru_queue queue)
344 {
345         return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
346 }
347
348 /* Push a match offset onto the front (most recently used) end of the queue.  */
349 static inline struct lzx_lru_queue
350 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
351 {
352         return (struct lzx_lru_queue) {
353                 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
354         };
355 }
356
357 /* Swap a match offset to the front of the queue.  */
358 static inline struct lzx_lru_queue
359 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
360 {
361         if (idx == 0)
362                 return queue;
363
364         if (idx == 1)
365                 return (struct lzx_lru_queue) {
366                         .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
367                              (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
368                              (queue.R & LZX_QUEUE64_R2_MASK),
369                 };
370
371         return (struct lzx_lru_queue) {
372                 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
373                      (queue.R & LZX_QUEUE64_R1_MASK) |
374                      (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
375         };
376 }
377
378 /* The main LZX compressor structure  */
379 struct lzx_compressor {
380
381         /* The "nice" match length: if a match of this length is found, then
382          * choose it immediately without further consideration.  */
383         unsigned nice_match_length;
384
385         /* The maximum search depth: consider at most this many potential
386          * matches at each position.  */
387         unsigned max_search_depth;
388
389         /* The log base 2 of the LZX window size for LZ match offset encoding
390          * purposes.  This will be >= LZX_MIN_WINDOW_ORDER and <=
391          * LZX_MAX_WINDOW_ORDER.  */
392         unsigned window_order;
393
394         /* The number of symbols in the main alphabet.  This depends on
395          * @window_order, since @window_order determines the maximum possible
396          * offset.  */
397         unsigned num_main_syms;
398
399         /* Number of optimization passes per block  */
400         unsigned num_optim_passes;
401
402         /* The preprocessed buffer of data being compressed  */
403         u8 *in_buffer;
404
405         /* The number of bytes of data to be compressed, which is the number of
406          * bytes of data in @in_buffer that are actually valid.  */
407         size_t in_nbytes;
408
409         /* Pointer to the compress() implementation chosen at allocation time */
410         void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
411
412         /* If true, the compressor need not preserve the input buffer if it
413          * compresses the data successfully.  */
414         bool destructive;
415
416         /* The Huffman symbol frequency counters for the current block.  */
417         struct lzx_freqs freqs;
418
419         /* Block split statistics.  */
420         struct block_split_stats split_stats;
421
422         /* The Huffman codes for the current and previous blocks.  The one with
423          * index 'codes_index' is for the current block, and the other one is
424          * for the previous block.  */
425         struct lzx_codes codes[2];
426         unsigned codes_index;
427
428         /* The matches and literals that the parser has chosen for the current
429          * block.  The required length of this array is limited by the maximum
430          * number of matches that can ever be chosen for a single block, plus
431          * one for the special entry at the end.  */
432         struct lzx_sequence chosen_sequences[
433                        DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
434
435         /* Tables for mapping adjusted offsets to offset slots  */
436
437         /* offset slots [0, 29]  */
438         u8 offset_slot_tab_1[32768];
439
440         /* offset slots [30, 49]  */
441         u8 offset_slot_tab_2[128];
442
443         union {
444                 /* Data for greedy or lazy parsing  */
445                 struct {
446                         /* Hash chains matchfinder (MUST BE LAST!!!)  */
447                         union {
448                                 struct hc_matchfinder_16 hc_mf_16;
449                                 struct hc_matchfinder_32 hc_mf_32;
450                         };
451                 };
452
453                 /* Data for near-optimal parsing  */
454                 struct {
455                         /*
456                          * Array of nodes, one per position, for running the
457                          * minimum-cost path algorithm.
458                          *
459                          * This array must be large enough to accommodate the
460                          * worst-case number of nodes, which occurs if we find a
461                          * match of length LZX_MAX_MATCH_LEN at position
462                          * SOFT_MAX_BLOCK_SIZE - 1, producing a block of length
463                          * SOFT_MAX_BLOCK_SIZE - 1 + LZX_MAX_MATCH_LEN.  Add one
464                          * for the end-of-block node.
465                          */
466                         struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_SIZE - 1 +
467                                                               LZX_MAX_MATCH_LEN + 1];
468
469                         /* The cost model for the current block  */
470                         struct lzx_costs costs;
471
472                         /*
473                          * Cached matches for the current block.  This array
474                          * contains the matches that were found at each position
475                          * in the block.  Specifically, for each position, there
476                          * is a special 'struct lz_match' whose 'length' field
477                          * contains the number of matches that were found at
478                          * that position; this is followed by the matches
479                          * themselves, if any, sorted by strictly increasing
480                          * length.
481                          *
482                          * Note: in rare cases, there will be a very high number
483                          * of matches in the block and this array will overflow.
484                          * If this happens, we force the end of the current
485                          * block.  LZX_CACHE_LENGTH is the length at which we
486                          * actually check for overflow.  The extra slots beyond
487                          * this are enough to absorb the worst case overflow,
488                          * which occurs if starting at
489                          * &match_cache[LZX_CACHE_LENGTH - 1], we write the
490                          * match count header, then write
491                          * LZX_MAX_MATCHES_PER_POS matches, then skip searching
492                          * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
493                          * write the match count header for each.
494                          */
495                         struct lz_match match_cache[LZX_CACHE_LENGTH +
496                                                     LZX_MAX_MATCHES_PER_POS +
497                                                     LZX_MAX_MATCH_LEN - 1];
498
499                         /* Binary trees matchfinder (MUST BE LAST!!!)  */
500                         union {
501                                 struct bt_matchfinder_16 bt_mf_16;
502                                 struct bt_matchfinder_32 bt_mf_32;
503                         };
504                 };
505         };
506 };
507
508 /*
509  * Will a matchfinder using 16-bit positions be sufficient for compressing
510  * buffers of up to the specified size?  The limit could be 65536 bytes, but we
511  * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
512  * This requires that the limit be no more than the length of offset_slot_tab_1
513  * (currently 32768).
514  */
515 static inline bool
516 lzx_is_16_bit(size_t max_bufsize)
517 {
518         STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
519         return max_bufsize <= 32768;
520 }
521
522 /*
523  * The following macros call either the 16-bit or the 32-bit version of a
524  * matchfinder function based on the value of 'is_16_bit', which will be known
525  * at compilation time.
526  */
527
528 #define CALL_HC_MF(is_16_bit, c, funcname, ...)                               \
529         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
530                        CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
531
532 #define CALL_BT_MF(is_16_bit, c, funcname, ...)                               \
533         ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
534                        CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
535
536 /*
537  * Structure to keep track of the current state of sending bits to the
538  * compressed output buffer.
539  *
540  * The LZX bitstream is encoded as a sequence of 16-bit coding units.
541  */
542 struct lzx_output_bitstream {
543
544         /* Bits that haven't yet been written to the output buffer.  */
545         machine_word_t bitbuf;
546
547         /* Number of bits currently held in @bitbuf.  */
548         u32 bitcount;
549
550         /* Pointer to the start of the output buffer.  */
551         u8 *start;
552
553         /* Pointer to the position in the output buffer at which the next coding
554          * unit should be written.  */
555         u8 *next;
556
557         /* Pointer just past the end of the output buffer, rounded down to a
558          * 2-byte boundary.  */
559         u8 *end;
560 };
561
562 /* Can the specified number of bits always be added to 'bitbuf' after any
563  * pending 16-bit coding units have been flushed?  */
564 #define CAN_BUFFER(n)   ((n) <= (8 * sizeof(machine_word_t)) - 15)
565
566 /*
567  * Initialize the output bitstream.
568  *
569  * @os
570  *      The output bitstream structure to initialize.
571  * @buffer
572  *      The buffer being written to.
573  * @size
574  *      Size of @buffer, in bytes.
575  */
576 static void
577 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
578 {
579         os->bitbuf = 0;
580         os->bitcount = 0;
581         os->start = buffer;
582         os->next = os->start;
583         os->end = os->start + (size & ~1);
584 }
585
586 /* Add some bits to the bitbuffer variable of the output bitstream.  The caller
587  * must make sure there is enough room.  */
588 static inline void
589 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
590 {
591         os->bitbuf = (os->bitbuf << num_bits) | bits;
592         os->bitcount += num_bits;
593 }
594
595 /* Flush bits from the bitbuffer variable to the output buffer.  'max_num_bits'
596  * specifies the maximum number of bits that may have been added since the last
597  * flush.  */
598 static inline void
599 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
600 {
601         /* Masking the number of bits to shift is only needed to avoid undefined
602          * behavior; we don't actually care about the results of bad shifts.  On
603          * x86, the explicit masking generates no extra code.  */
604         const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
605
606         if (os->end - os->next < 6)
607                 return;
608         put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
609                                             shift_mask), os->next + 0);
610         if (max_num_bits > 16)
611                 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
612                                                 shift_mask), os->next + 2);
613         if (max_num_bits > 32)
614                 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
615                                                 shift_mask), os->next + 4);
616         os->next += (os->bitcount >> 4) << 1;
617         os->bitcount &= 15;
618 }
619
620 /* Add at most 16 bits to the bitbuffer and flush it.  */
621 static inline void
622 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
623 {
624         lzx_add_bits(os, bits, num_bits);
625         lzx_flush_bits(os, 16);
626 }
627
628 /*
629  * Flush the last coding unit to the output buffer if needed.  Return the total
630  * number of bytes written to the output buffer, or 0 if an overflow occurred.
631  */
632 static u32
633 lzx_flush_output(struct lzx_output_bitstream *os)
634 {
635         if (os->end - os->next < 6)
636                 return 0;
637
638         if (os->bitcount != 0) {
639                 put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
640                 os->next += 2;
641         }
642
643         return os->next - os->start;
644 }
645
646 /*
647  * Build the main, length, and aligned offset Huffman codes used in LZX.
648  *
649  * This takes as input the frequency tables for each code and produces as output
650  * a set of tables that map symbols to codewords and codeword lengths.
651  */
652 static void
653 lzx_make_huffman_codes(struct lzx_compressor *c)
654 {
655         const struct lzx_freqs *freqs = &c->freqs;
656         struct lzx_codes *codes = &c->codes[c->codes_index];
657
658         STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
659                       MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
660         STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
661                       LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
662         STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
663                       ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
664
665         make_canonical_huffman_code(c->num_main_syms,
666                                     MAIN_CODEWORD_LIMIT,
667                                     freqs->main,
668                                     codes->lens.main,
669                                     codes->codewords.main);
670
671         make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
672                                     LENGTH_CODEWORD_LIMIT,
673                                     freqs->len,
674                                     codes->lens.len,
675                                     codes->codewords.len);
676
677         make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
678                                     ALIGNED_CODEWORD_LIMIT,
679                                     freqs->aligned,
680                                     codes->lens.aligned,
681                                     codes->codewords.aligned);
682 }
683
684 /* Reset the symbol frequencies for the LZX Huffman codes.  */
685 static void
686 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
687 {
688         memset(&c->freqs, 0, sizeof(c->freqs));
689 }
690
691 static unsigned
692 lzx_compute_precode_items(const u8 lens[restrict],
693                           const u8 prev_lens[restrict],
694                           u32 precode_freqs[restrict],
695                           unsigned precode_items[restrict])
696 {
697         unsigned *itemptr;
698         unsigned run_start;
699         unsigned run_end;
700         unsigned extra_bits;
701         int delta;
702         u8 len;
703
704         itemptr = precode_items;
705         run_start = 0;
706
707         while (!((len = lens[run_start]) & 0x80)) {
708
709                 /* len = the length being repeated  */
710
711                 /* Find the next run of codeword lengths.  */
712
713                 run_end = run_start + 1;
714
715                 /* Fast case for a single length.  */
716                 if (likely(len != lens[run_end])) {
717                         delta = prev_lens[run_start] - len;
718                         if (delta < 0)
719                                 delta += 17;
720                         precode_freqs[delta]++;
721                         *itemptr++ = delta;
722                         run_start++;
723                         continue;
724                 }
725
726                 /* Extend the run.  */
727                 do {
728                         run_end++;
729                 } while (len == lens[run_end]);
730
731                 if (len == 0) {
732                         /* Run of zeroes.  */
733
734                         /* Symbol 18: RLE 20 to 51 zeroes at a time.  */
735                         while ((run_end - run_start) >= 20) {
736                                 extra_bits = min((run_end - run_start) - 20, 0x1f);
737                                 precode_freqs[18]++;
738                                 *itemptr++ = 18 | (extra_bits << 5);
739                                 run_start += 20 + extra_bits;
740                         }
741
742                         /* Symbol 17: RLE 4 to 19 zeroes at a time.  */
743                         if ((run_end - run_start) >= 4) {
744                                 extra_bits = min((run_end - run_start) - 4, 0xf);
745                                 precode_freqs[17]++;
746                                 *itemptr++ = 17 | (extra_bits << 5);
747                                 run_start += 4 + extra_bits;
748                         }
749                 } else {
750
751                         /* A run of nonzero lengths. */
752
753                         /* Symbol 19: RLE 4 to 5 of any length at a time.  */
754                         while ((run_end - run_start) >= 4) {
755                                 extra_bits = (run_end - run_start) > 4;
756                                 delta = prev_lens[run_start] - len;
757                                 if (delta < 0)
758                                         delta += 17;
759                                 precode_freqs[19]++;
760                                 precode_freqs[delta]++;
761                                 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
762                                 run_start += 4 + extra_bits;
763                         }
764                 }
765
766                 /* Output any remaining lengths without RLE.  */
767                 while (run_start != run_end) {
768                         delta = prev_lens[run_start] - len;
769                         if (delta < 0)
770                                 delta += 17;
771                         precode_freqs[delta]++;
772                         *itemptr++ = delta;
773                         run_start++;
774                 }
775         }
776
777         return itemptr - precode_items;
778 }
779
780 /*
781  * Output a Huffman code in the compressed form used in LZX.
782  *
783  * The Huffman code is represented in the output as a logical series of codeword
784  * lengths from which the Huffman code, which must be in canonical form, can be
785  * reconstructed.
786  *
787  * The codeword lengths are themselves compressed using a separate Huffman code,
788  * the "precode", which contains a symbol for each possible codeword length in
789  * the larger code as well as several special symbols to represent repeated
790  * codeword lengths (a form of run-length encoding).  The precode is itself
791  * constructed in canonical form, and its codeword lengths are represented
792  * literally in 20 4-bit fields that immediately precede the compressed codeword
793  * lengths of the larger code.
794  *
795  * Furthermore, the codeword lengths of the larger code are actually represented
796  * as deltas from the codeword lengths of the corresponding code in the previous
797  * block.
798  *
799  * @os:
800  *      Bitstream to which to write the compressed Huffman code.
801  * @lens:
802  *      The codeword lengths, indexed by symbol, in the Huffman code.
803  * @prev_lens:
804  *      The codeword lengths, indexed by symbol, in the corresponding Huffman
805  *      code in the previous block, or all zeroes if this is the first block.
806  * @num_lens:
807  *      The number of symbols in the Huffman code.
808  */
809 static void
810 lzx_write_compressed_code(struct lzx_output_bitstream *os,
811                           const u8 lens[restrict],
812                           const u8 prev_lens[restrict],
813                           unsigned num_lens)
814 {
815         u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
816         u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
817         u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
818         unsigned precode_items[num_lens];
819         unsigned num_precode_items;
820         unsigned precode_item;
821         unsigned precode_sym;
822         unsigned i;
823         u8 saved = lens[num_lens];
824         *(u8 *)(lens + num_lens) = 0x80;
825
826         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
827                 precode_freqs[i] = 0;
828
829         /* Compute the "items" (RLE / literal tokens and extra bits) with which
830          * the codeword lengths in the larger code will be output.  */
831         num_precode_items = lzx_compute_precode_items(lens,
832                                                       prev_lens,
833                                                       precode_freqs,
834                                                       precode_items);
835
836         /* Build the precode.  */
837         STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
838                       PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
839         make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
840                                     PRE_CODEWORD_LIMIT,
841                                     precode_freqs, precode_lens,
842                                     precode_codewords);
843
844         /* Output the lengths of the codewords in the precode.  */
845         for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
846                 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
847
848         /* Output the encoded lengths of the codewords in the larger code.  */
849         for (i = 0; i < num_precode_items; i++) {
850                 precode_item = precode_items[i];
851                 precode_sym = precode_item & 0x1F;
852                 lzx_add_bits(os, precode_codewords[precode_sym],
853                              precode_lens[precode_sym]);
854                 if (precode_sym >= 17) {
855                         if (precode_sym == 17) {
856                                 lzx_add_bits(os, precode_item >> 5, 4);
857                         } else if (precode_sym == 18) {
858                                 lzx_add_bits(os, precode_item >> 5, 5);
859                         } else {
860                                 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
861                                 precode_sym = precode_item >> 6;
862                                 lzx_add_bits(os, precode_codewords[precode_sym],
863                                              precode_lens[precode_sym]);
864                         }
865                 }
866                 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
867                 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
868         }
869
870         *(u8 *)(lens + num_lens) = saved;
871 }
872
873 /*
874  * Write all matches and literal bytes (which were precomputed) in an LZX
875  * compressed block to the output bitstream in the final compressed
876  * representation.
877  *
878  * @os
879  *      The output bitstream.
880  * @block_type
881  *      The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
882  *      LZX_BLOCKTYPE_VERBATIM).
883  * @block_data
884  *      The uncompressed data of the block.
885  * @sequences
886  *      The matches and literals to output, given as a series of sequences.
887  * @codes
888  *      The main, length, and aligned offset Huffman codes for the current
889  *      LZX compressed block.
890  */
891 static void
892 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
893                     const u8 *block_data, const struct lzx_sequence sequences[],
894                     const struct lzx_codes *codes)
895 {
896         const struct lzx_sequence *seq = sequences;
897         u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
898
899         for (;;) {
900                 /* Output the next sequence.  */
901
902                 unsigned litrunlen = seq->litrunlen;
903                 unsigned match_hdr;
904                 unsigned main_symbol;
905                 unsigned adjusted_length;
906                 u32 adjusted_offset;
907                 unsigned offset_slot;
908                 unsigned num_extra_bits;
909                 u32 extra_bits;
910
911                 /* Output the literal run of the sequence.  */
912
913                 if (litrunlen) {  /* Is the literal run nonempty?  */
914
915                         /* Verify optimization is enabled on 64-bit  */
916                         STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
917                                       CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
918
919                         if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
920
921                                 /* 64-bit: write 3 literals at a time.  */
922                                 while (litrunlen >= 3) {
923                                         unsigned lit0 = block_data[0];
924                                         unsigned lit1 = block_data[1];
925                                         unsigned lit2 = block_data[2];
926                                         lzx_add_bits(os, codes->codewords.main[lit0],
927                                                      codes->lens.main[lit0]);
928                                         lzx_add_bits(os, codes->codewords.main[lit1],
929                                                      codes->lens.main[lit1]);
930                                         lzx_add_bits(os, codes->codewords.main[lit2],
931                                                      codes->lens.main[lit2]);
932                                         lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
933                                         block_data += 3;
934                                         litrunlen -= 3;
935                                 }
936                                 if (litrunlen--) {
937                                         unsigned lit = *block_data++;
938                                         lzx_add_bits(os, codes->codewords.main[lit],
939                                                      codes->lens.main[lit]);
940                                         if (litrunlen--) {
941                                                 unsigned lit = *block_data++;
942                                                 lzx_add_bits(os, codes->codewords.main[lit],
943                                                              codes->lens.main[lit]);
944                                                 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
945                                         } else {
946                                                 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
947                                         }
948                                 }
949                         } else {
950                                 /* 32-bit: write 1 literal at a time.  */
951                                 do {
952                                         unsigned lit = *block_data++;
953                                         lzx_add_bits(os, codes->codewords.main[lit],
954                                                      codes->lens.main[lit]);
955                                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
956                                 } while (--litrunlen);
957                         }
958                 }
959
960                 /* Was this the last literal run?  */
961                 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
962                         return;
963
964                 /* Nope; output the match.  */
965
966                 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
967                 main_symbol = LZX_NUM_CHARS + match_hdr;
968                 adjusted_length = seq->adjusted_length;
969
970                 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
971
972                 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
973                 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
974
975                 num_extra_bits = lzx_extra_offset_bits[offset_slot];
976                 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
977
978         #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
979                                  14 + ALIGNED_CODEWORD_LIMIT)
980
981                 /* Verify optimization is enabled on 64-bit  */
982                 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
983
984                 /* Output the main symbol for the match.  */
985
986                 lzx_add_bits(os, codes->codewords.main[main_symbol],
987                              codes->lens.main[main_symbol]);
988                 if (!CAN_BUFFER(MAX_MATCH_BITS))
989                         lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
990
991                 /* If needed, output the length symbol for the match.  */
992
993                 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
994                         lzx_add_bits(os, codes->codewords.len[adjusted_length -
995                                                               LZX_NUM_PRIMARY_LENS],
996                                      codes->lens.len[adjusted_length -
997                                                      LZX_NUM_PRIMARY_LENS]);
998                         if (!CAN_BUFFER(MAX_MATCH_BITS))
999                                 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
1000                 }
1001
1002                 /* Output the extra offset bits for the match.  In aligned
1003                  * offset blocks, the lowest 3 bits of the adjusted offset are
1004                  * Huffman-encoded using the aligned offset code, provided that
1005                  * there are at least extra 3 offset bits required.  All other
1006                  * extra offset bits are output verbatim.  */
1007
1008                 if ((adjusted_offset & ones_if_aligned) >= 16) {
1009
1010                         lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
1011                                      num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1012                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1013                                 lzx_flush_bits(os, 14);
1014
1015                         lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1016                                                                   LZX_ALIGNED_OFFSET_BITMASK],
1017                                      codes->lens.aligned[adjusted_offset &
1018                                                          LZX_ALIGNED_OFFSET_BITMASK]);
1019                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1020                                 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1021                 } else {
1022                         STATIC_ASSERT(CAN_BUFFER(17));
1023
1024                         lzx_add_bits(os, extra_bits, num_extra_bits);
1025                         if (!CAN_BUFFER(MAX_MATCH_BITS))
1026                                 lzx_flush_bits(os, 17);
1027                 }
1028
1029                 if (CAN_BUFFER(MAX_MATCH_BITS))
1030                         lzx_flush_bits(os, MAX_MATCH_BITS);
1031
1032                 /* Advance to the next sequence.  */
1033                 seq++;
1034         }
1035 }
1036
1037 static void
1038 lzx_write_compressed_block(const u8 *block_begin,
1039                            int block_type,
1040                            u32 block_size,
1041                            unsigned window_order,
1042                            unsigned num_main_syms,
1043                            const struct lzx_sequence sequences[],
1044                            const struct lzx_codes * codes,
1045                            const struct lzx_lens * prev_lens,
1046                            struct lzx_output_bitstream * os)
1047 {
1048         /* The first three bits indicate the type of block and are one of the
1049          * LZX_BLOCKTYPE_* constants.  */
1050         lzx_write_bits(os, block_type, 3);
1051
1052         /* Output the block size.
1053          *
1054          * The original LZX format seemed to always encode the block size in 3
1055          * bytes.  However, the implementation in WIMGAPI, as used in WIM files,
1056          * uses the first bit to indicate whether the block is the default size
1057          * (32768) or a different size given explicitly by the next 16 bits.
1058          *
1059          * By default, this compressor uses a window size of 32768 and therefore
1060          * follows the WIMGAPI behavior.  However, this compressor also supports
1061          * window sizes greater than 32768 bytes, which do not appear to be
1062          * supported by WIMGAPI.  In such cases, we retain the default size bit
1063          * to mean a size of 32768 bytes but output non-default block size in 24
1064          * bits rather than 16.  The compatibility of this behavior is unknown
1065          * because WIMs created with chunk size greater than 32768 can seemingly
1066          * only be opened by wimlib anyway.  */
1067         if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1068                 lzx_write_bits(os, 1, 1);
1069         } else {
1070                 lzx_write_bits(os, 0, 1);
1071
1072                 if (window_order >= 16)
1073                         lzx_write_bits(os, block_size >> 16, 8);
1074
1075                 lzx_write_bits(os, block_size & 0xFFFF, 16);
1076         }
1077
1078         /* If it's an aligned offset block, output the aligned offset code.  */
1079         if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1080                 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1081                         lzx_write_bits(os, codes->lens.aligned[i],
1082                                        LZX_ALIGNEDCODE_ELEMENT_SIZE);
1083                 }
1084         }
1085
1086         /* Output the main code (two parts).  */
1087         lzx_write_compressed_code(os, codes->lens.main,
1088                                   prev_lens->main,
1089                                   LZX_NUM_CHARS);
1090         lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1091                                   prev_lens->main + LZX_NUM_CHARS,
1092                                   num_main_syms - LZX_NUM_CHARS);
1093
1094         /* Output the length code.  */
1095         lzx_write_compressed_code(os, codes->lens.len,
1096                                   prev_lens->len,
1097                                   LZX_LENCODE_NUM_SYMBOLS);
1098
1099         /* Output the compressed matches and literals.  */
1100         lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1101 }
1102
1103 /* Given the frequencies of symbols in an LZX-compressed block and the
1104  * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1105  * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1106  * will take fewer bits to output.  */
1107 static int
1108 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1109                                const struct lzx_codes * codes)
1110 {
1111         u32 aligned_cost = 0;
1112         u32 verbatim_cost = 0;
1113
1114         /* A verbatim block requires 3 bits in each place that an aligned symbol
1115          * would be used in an aligned offset block.  */
1116         for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1117                 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1118                 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1119         }
1120
1121         /* Account for output of the aligned offset code.  */
1122         aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1123
1124         if (aligned_cost < verbatim_cost)
1125                 return LZX_BLOCKTYPE_ALIGNED;
1126         else
1127                 return LZX_BLOCKTYPE_VERBATIM;
1128 }
1129
1130 /*
1131  * Return the offset slot for the specified adjusted match offset, using the
1132  * compressor's acceleration tables to speed up the mapping.
1133  */
1134 static inline unsigned
1135 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1136                          bool is_16_bit)
1137 {
1138         if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1139                 return c->offset_slot_tab_1[adjusted_offset];
1140         return c->offset_slot_tab_2[adjusted_offset >> 14];
1141 }
1142
1143 /*
1144  * Flush an LZX block:
1145  *
1146  * 1. Build the Huffman codes.
1147  * 2. Decide whether to output the block as VERBATIM or ALIGNED.
1148  * 3. Write the block.
1149  * 4. Swap the indices of the current and previous Huffman codes.
1150  */
1151 static void
1152 lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1153                 const u8 *block_begin, u32 block_size, u32 seq_idx)
1154 {
1155         int block_type;
1156
1157         lzx_make_huffman_codes(c);
1158
1159         block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1160                                                     &c->codes[c->codes_index]);
1161         lzx_write_compressed_block(block_begin,
1162                                    block_type,
1163                                    block_size,
1164                                    c->window_order,
1165                                    c->num_main_syms,
1166                                    &c->chosen_sequences[seq_idx],
1167                                    &c->codes[c->codes_index],
1168                                    &c->codes[c->codes_index ^ 1].lens,
1169                                    os);
1170         c->codes_index ^= 1;
1171 }
1172
1173 /* Tally the Huffman symbol for a literal and increment the literal run length.
1174  */
1175 static inline void
1176 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1177 {
1178         c->freqs.main[literal]++;
1179         ++*litrunlen_p;
1180 }
1181
1182 /* Tally the Huffman symbol for a match, save the match data and the length of
1183  * the preceding literal run in the next lzx_sequence, and update the recent
1184  * offsets queue.  */
1185 static inline void
1186 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1187                  u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1188                  u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1189 {
1190         u32 litrunlen = *litrunlen_p;
1191         struct lzx_sequence *next_seq = *next_seq_p;
1192         unsigned offset_slot;
1193         unsigned v;
1194
1195         v = length - LZX_MIN_MATCH_LEN;
1196
1197         /* Save the literal run length and adjusted length.  */
1198         next_seq->litrunlen = litrunlen;
1199         next_seq->adjusted_length = v;
1200
1201         /* Compute the length header and tally the length symbol if needed  */
1202         if (v >= LZX_NUM_PRIMARY_LENS) {
1203                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1204                 v = LZX_NUM_PRIMARY_LENS;
1205         }
1206
1207         /* Compute the offset slot  */
1208         offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1209
1210         /* Compute the match header.  */
1211         v += offset_slot * LZX_NUM_LEN_HEADERS;
1212
1213         /* Save the adjusted offset and match header.  */
1214         next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1215
1216         /* Tally the main symbol.  */
1217         c->freqs.main[LZX_NUM_CHARS + v]++;
1218
1219         /* Update the recent offsets queue.  */
1220         if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1221                 /* Repeat offset match  */
1222                 swap(recent_offsets[0], recent_offsets[offset_data]);
1223         } else {
1224                 /* Explicit offset match  */
1225
1226                 /* Tally the aligned offset symbol if needed  */
1227                 if (offset_data >= 16)
1228                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1229
1230                 recent_offsets[2] = recent_offsets[1];
1231                 recent_offsets[1] = recent_offsets[0];
1232                 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1233         }
1234
1235         /* Reset the literal run length and advance to the next sequence.  */
1236         *next_seq_p = next_seq + 1;
1237         *litrunlen_p = 0;
1238 }
1239
1240 /* Finish the last lzx_sequence.  The last lzx_sequence is just a literal run;
1241  * there is no match.  This literal run may be empty.  */
1242 static inline void
1243 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1244 {
1245         last_seq->litrunlen = litrunlen;
1246
1247         /* Special value to mark last sequence  */
1248         last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1249 }
1250
1251 /******************************************************************************/
1252
1253 /*
1254  * Block splitting algorithm.  The problem is to decide when it is worthwhile to
1255  * start a new block with new entropy codes.  There is a theoretically optimal
1256  * solution: recursively consider every possible block split, considering the
1257  * exact cost of each block, and choose the minimum cost approach.  But this is
1258  * far too slow.  Instead, as an approximation, we can count symbols and after
1259  * every N symbols, compare the expected distribution of symbols based on the
1260  * previous data with the actual distribution.  If they differ "by enough", then
1261  * start a new block.
1262  *
1263  * As an optimization and heuristic, we don't distinguish between every symbol
1264  * but rather we combine many symbols into a single "observation type".  For
1265  * literals we only look at the high bits and low bits, and for matches we only
1266  * look at whether the match is long or not.  The assumption is that for typical
1267  * "real" data, places that are good block boundaries will tend to be noticable
1268  * based only on changes in these aggregate frequencies, without looking for
1269  * subtle differences in individual symbols.  For example, a change from ASCII
1270  * bytes to non-ASCII bytes, or from few matches (generally less compressible)
1271  * to many matches (generally more compressible), would be easily noticed based
1272  * on the aggregates.
1273  *
1274  * For determining whether the frequency distributions are "different enough" to
1275  * start a new block, the simply heuristic of splitting when the sum of absolute
1276  * differences exceeds a constant seems to be good enough.  We also add a number
1277  * proportional to the block size so that the algorithm is more likely to end
1278  * large blocks than small blocks.  This reflects the general expectation that
1279  * it will become increasingly beneficial to start a new block as the current
1280  * blocks grows larger.
1281  *
1282  * Finally, for an approximation, it is not strictly necessary that the exact
1283  * symbols being used are considered.  With "near-optimal parsing", for example,
1284  * the actual symbols that will be used are unknown until after the block
1285  * boundary is chosen and the block has been optimized.  Since the final choices
1286  * cannot be used, we can use preliminary "greedy" choices instead.
1287  */
1288
1289 /* Initialize the block split statistics when starting a new block. */
1290 static void
1291 init_block_split_stats(struct block_split_stats *stats)
1292 {
1293         for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1294                 stats->new_observations[i] = 0;
1295                 stats->observations[i] = 0;
1296         }
1297         stats->num_new_observations = 0;
1298         stats->num_observations = 0;
1299 }
1300
1301 /* Literal observation.  Heuristic: use the top 2 bits and low 1 bits of the
1302  * literal, for 8 possible literal observation types.  */
1303 static inline void
1304 observe_literal(struct block_split_stats *stats, u8 lit)
1305 {
1306         stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
1307         stats->num_new_observations++;
1308 }
1309
1310 /* Match observation.  Heuristic: use one observation type for "short match" and
1311  * one observation type for "long match".  */
1312 static inline void
1313 observe_match(struct block_split_stats *stats, unsigned length)
1314 {
1315         stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
1316         stats->num_new_observations++;
1317 }
1318
1319 static bool
1320 do_end_block_check(struct block_split_stats *stats, u32 block_size)
1321 {
1322         if (stats->num_observations > 0) {
1323
1324                 /* Note: to avoid slow divisions, we do not divide by
1325                  * 'num_observations', but rather do all math with the numbers
1326                  * multiplied by 'num_observations'.  */
1327                 u32 total_delta = 0;
1328                 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1329                         u32 expected = stats->observations[i] * stats->num_new_observations;
1330                         u32 actual = stats->new_observations[i] * stats->num_observations;
1331                         u32 delta = (actual > expected) ? actual - expected :
1332                                                           expected - actual;
1333                         total_delta += delta;
1334                 }
1335
1336                 /* Ready to end the block? */
1337                 if (total_delta + (block_size / 1024) * stats->num_observations >=
1338                     stats->num_new_observations * 51 / 64 * stats->num_observations)
1339                         return true;
1340         }
1341
1342         for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1343                 stats->num_observations += stats->new_observations[i];
1344                 stats->observations[i] += stats->new_observations[i];
1345                 stats->new_observations[i] = 0;
1346         }
1347         stats->num_new_observations = 0;
1348         return false;
1349 }
1350
1351 static inline bool
1352 should_end_block(struct block_split_stats *stats,
1353                  const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
1354 {
1355         /* Ready to check block split statistics? */
1356         if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK ||
1357             in_next - in_block_begin < MIN_BLOCK_SIZE ||
1358             in_end - in_next < MIN_BLOCK_SIZE)
1359                 return false;
1360
1361         return do_end_block_check(stats, in_next - in_block_begin);
1362 }
1363
1364 /******************************************************************************/
1365
1366 /*
1367  * Given the minimum-cost path computed through the item graph for the current
1368  * block, walk the path and count how many of each symbol in each Huffman-coded
1369  * alphabet would be required to output the items (matches and literals) along
1370  * the path.
1371  *
1372  * Note that the path will be walked backwards (from the end of the block to the
1373  * beginning of the block), but this doesn't matter because this function only
1374  * computes frequencies.
1375  */
1376 static inline void
1377 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1378 {
1379         u32 node_idx = block_size;
1380
1381         for (;;) {
1382                 u32 item;
1383                 u32 len;
1384                 u32 offset_data;
1385                 unsigned v;
1386                 unsigned offset_slot;
1387
1388                 /* Tally literals until either a match or the beginning of the
1389                  * block is reached.  */
1390                 for (;;) {
1391                         item = c->optimum_nodes[node_idx].item;
1392                         if (item & OPTIMUM_LEN_MASK)
1393                                 break;
1394                         c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
1395                         node_idx--;
1396                 }
1397
1398                 if (item & OPTIMUM_EXTRA_FLAG) {
1399
1400                         if (node_idx == 0)
1401                                 break;
1402
1403                         /* Tally a rep0 match.  */
1404                         len = item & OPTIMUM_LEN_MASK;
1405                         v = len - LZX_MIN_MATCH_LEN;
1406                         if (v >= LZX_NUM_PRIMARY_LENS) {
1407                                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1408                                 v = LZX_NUM_PRIMARY_LENS;
1409                         }
1410                         c->freqs.main[LZX_NUM_CHARS + v]++;
1411
1412                         /* Tally a literal.  */
1413                         c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
1414
1415                         item = c->optimum_nodes[node_idx].extra_match;
1416                         node_idx -= len + 1;
1417                 }
1418
1419                 len = item & OPTIMUM_LEN_MASK;
1420                 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1421
1422                 node_idx -= len;
1423
1424                 /* Tally a match.  */
1425
1426                 /* Tally the aligned offset symbol if needed.  */
1427                 if (offset_data >= 16)
1428                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1429
1430                 /* Tally the length symbol if needed.  */
1431                 v = len - LZX_MIN_MATCH_LEN;;
1432                 if (v >= LZX_NUM_PRIMARY_LENS) {
1433                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1434                         v = LZX_NUM_PRIMARY_LENS;
1435                 }
1436
1437                 /* Tally the main symbol.  */
1438                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1439                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1440                 c->freqs.main[LZX_NUM_CHARS + v]++;
1441         }
1442 }
1443
1444 /*
1445  * Like lzx_tally_item_list(), but this function also generates the list of
1446  * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1447  * ready to be output to the bitstream after the Huffman codes are computed.
1448  * The lzx_sequences will be written to decreasing memory addresses as the path
1449  * is walked backwards, which means they will end up in the expected
1450  * first-to-last order.  The return value is the index in c->chosen_sequences at
1451  * which the lzx_sequences begin.
1452  */
1453 static inline u32
1454 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1455 {
1456         u32 node_idx = block_size;
1457         u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1458         u32 lit_start_node;
1459
1460         /* Special value to mark last sequence  */
1461         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1462
1463         lit_start_node = node_idx;
1464         for (;;) {
1465                 u32 item;
1466                 u32 len;
1467                 u32 offset_data;
1468                 unsigned v;
1469                 unsigned offset_slot;
1470
1471                 /* Tally literals until either a match or the beginning of the
1472                  * block is reached.  */
1473                 for (;;) {
1474                         item = c->optimum_nodes[node_idx].item;
1475                         if (item & OPTIMUM_LEN_MASK)
1476                                 break;
1477                         c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
1478                         node_idx--;
1479                 }
1480
1481                 if (item & OPTIMUM_EXTRA_FLAG) {
1482
1483                         if (node_idx == 0)
1484                                 break;
1485
1486                         /* Save the literal run length for the next sequence
1487                          * (the "previous sequence" when walking backwards).  */
1488                         len = item & OPTIMUM_LEN_MASK;
1489                         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1490                         seq_idx--;
1491                         lit_start_node = node_idx - len;
1492
1493                         /* Tally a rep0 match.  */
1494                         v = len - LZX_MIN_MATCH_LEN;
1495                         c->chosen_sequences[seq_idx].adjusted_length = v;
1496                         if (v >= LZX_NUM_PRIMARY_LENS) {
1497                                 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1498                                 v = LZX_NUM_PRIMARY_LENS;
1499                         }
1500                         c->freqs.main[LZX_NUM_CHARS + v]++;
1501                         c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v;
1502
1503                         /* Tally a literal.  */
1504                         c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
1505
1506                         item = c->optimum_nodes[node_idx].extra_match;
1507                         node_idx -= len + 1;
1508                 }
1509
1510                 len = item & OPTIMUM_LEN_MASK;
1511                 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1512
1513                 /* Save the literal run length for the next sequence (the
1514                  * "previous sequence" when walking backwards).  */
1515                 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1516                 node_idx -= len;
1517                 lit_start_node = node_idx;
1518
1519                 /* Record a match.  */
1520
1521                 /* Tally the aligned offset symbol if needed.  */
1522                 if (offset_data >= 16)
1523                         c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1524
1525                 /* Save the adjusted length.  */
1526                 v = len - LZX_MIN_MATCH_LEN;
1527                 c->chosen_sequences[seq_idx].adjusted_length = v;
1528
1529                 /* Tally the length symbol if needed.  */
1530                 if (v >= LZX_NUM_PRIMARY_LENS) {
1531                         c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1532                         v = LZX_NUM_PRIMARY_LENS;
1533                 }
1534
1535                 /* Tally the main symbol.  */
1536                 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1537                 v += offset_slot * LZX_NUM_LEN_HEADERS;
1538                 c->freqs.main[LZX_NUM_CHARS + v]++;
1539
1540                 /* Save the adjusted offset and match header.  */
1541                 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1542                                 (offset_data << 9) | v;
1543         }
1544
1545         /* Save the literal run length for the first sequence.  */
1546         c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1547
1548         /* Return the index in c->chosen_sequences at which the lzx_sequences
1549          * begin.  */
1550         return seq_idx;
1551 }
1552
1553 /*
1554  * Find an inexpensive path through the graph of possible match/literal choices
1555  * for the current block.  The nodes of the graph are
1556  * c->optimum_nodes[0...block_size].  They correspond directly to the bytes in
1557  * the current block, plus one extra node for end-of-block.  The edges of the
1558  * graph are matches and literals.  The goal is to find the minimum cost path
1559  * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost
1560  * model 'c->costs'.
1561  *
1562  * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1563  * proceeding forwards one node at a time.  At each node, a selection of matches
1564  * (len >= 2), as well as the literal byte (len = 1), is considered.  An item of
1565  * length 'len' provides a new path to reach the node 'len' bytes later.  If
1566  * such a path is the lowest cost found so far to reach that later node, then
1567  * that later node is updated with the new path.
1568  *
1569  * Note that although this algorithm is based on minimum cost path search, due
1570  * to various simplifying assumptions the result is not guaranteed to be the
1571  * true minimum cost, or "optimal", path over the graph of all valid LZX
1572  * representations of this block.
1573  *
1574  * Also, note that because of the presence of the recent offsets queue (which is
1575  * a type of adaptive state), the algorithm cannot work backwards and compute
1576  * "cost to end" instead of "cost to beginning".  Furthermore, the way the
1577  * algorithm handles this adaptive state in the "minimum cost" parse is actually
1578  * only an approximation.  It's possible for the globally optimal, minimum cost
1579  * path to contain a prefix, ending at a position, where that path prefix is
1580  * *not* the minimum cost path to that position.  This can happen if such a path
1581  * prefix results in a different adaptive state which results in lower costs
1582  * later.  The algorithm does not solve this problem; it only considers the
1583  * lowest cost to reach each individual position.
1584  */
1585 static inline struct lzx_lru_queue
1586 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1587                        const u8 * const restrict block_begin,
1588                        const u32 block_size,
1589                        const struct lzx_lru_queue initial_queue,
1590                        bool is_16_bit)
1591 {
1592         struct lzx_optimum_node *cur_node = c->optimum_nodes;
1593         struct lz_match *cache_ptr = c->match_cache;
1594         const u8 *in_next = block_begin;
1595         const u8 * const block_end = block_begin + block_size;
1596
1597         /* Instead of storing the match offset LRU queues in the
1598          * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1599          * storing them in a smaller array.  This works because the algorithm
1600          * only requires a limited history of the adaptive state.  Once a given
1601          * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1602          * it is no longer needed.  */
1603         struct lzx_lru_queue queues[512];
1604
1605         STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1606 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1607
1608         /* Initially, the cost to reach each node is "infinity".  */
1609         memset(c->optimum_nodes, 0xFF,
1610                (block_size + 1) * sizeof(c->optimum_nodes[0]));
1611
1612         QUEUE(block_begin) = initial_queue;
1613
1614         /* The following loop runs 'block_size' iterations, one per node.  */
1615         do {
1616                 unsigned num_matches;
1617                 unsigned literal;
1618                 u32 cost;
1619
1620                 /*
1621                  * A selection of matches for the block was already saved in
1622                  * memory so that we don't have to run the uncompressed data
1623                  * through the matchfinder on every optimization pass.  However,
1624                  * we still search for repeat offset matches during each
1625                  * optimization pass because we cannot predict the state of the
1626                  * recent offsets queue.  But as a heuristic, we don't bother
1627                  * searching for repeat offset matches if the general-purpose
1628                  * matchfinder failed to find any matches.
1629                  *
1630                  * Note that a match of length n at some offset implies there is
1631                  * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1632                  * that same offset.  In other words, we don't necessarily need
1633                  * to use the full length of a match.  The key heuristic that
1634                  * saves a significicant amount of time is that for each
1635                  * distinct length, we only consider the smallest offset for
1636                  * which that length is available.  This heuristic also applies
1637                  * to repeat offsets, which we order specially: R0 < R1 < R2 <
1638                  * any explicit offset.  Of course, this heuristic may be
1639                  * produce suboptimal results because offset slots in LZX are
1640                  * subject to entropy encoding, but in practice this is a useful
1641                  * heuristic.
1642                  */
1643
1644                 num_matches = cache_ptr->length;
1645                 cache_ptr++;
1646
1647                 if (num_matches) {
1648                         struct lz_match *end_matches = cache_ptr + num_matches;
1649                         unsigned next_len = LZX_MIN_MATCH_LEN;
1650                         unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1651                         const u8 *matchptr;
1652
1653                         /* Consider R0 match  */
1654                         matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1655                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1656                                 goto R0_done;
1657                         STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1658                         do {
1659                                 u32 cost = cur_node->cost +
1660                                            c->costs.match_cost[0][
1661                                                         next_len - LZX_MIN_MATCH_LEN];
1662                                 if (cost <= (cur_node + next_len)->cost) {
1663                                         (cur_node + next_len)->cost = cost;
1664                                         (cur_node + next_len)->item =
1665                                                 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1666                                 }
1667                                 if (unlikely(++next_len > max_len)) {
1668                                         cache_ptr = end_matches;
1669                                         goto done_matches;
1670                                 }
1671                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1672
1673                 R0_done:
1674
1675                         /* Consider R1 match  */
1676                         matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1677                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1678                                 goto R1_done;
1679                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1680                                 goto R1_done;
1681                         for (unsigned len = 2; len < next_len - 1; len++)
1682                                 if (matchptr[len] != in_next[len])
1683                                         goto R1_done;
1684                         do {
1685                                 u32 cost = cur_node->cost +
1686                                            c->costs.match_cost[1][
1687                                                         next_len - LZX_MIN_MATCH_LEN];
1688                                 if (cost <= (cur_node + next_len)->cost) {
1689                                         (cur_node + next_len)->cost = cost;
1690                                         (cur_node + next_len)->item =
1691                                                 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1692                                 }
1693                                 if (unlikely(++next_len > max_len)) {
1694                                         cache_ptr = end_matches;
1695                                         goto done_matches;
1696                                 }
1697                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1698
1699                 R1_done:
1700
1701                         /* Consider R2 match  */
1702                         matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1703                         if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1704                                 goto R2_done;
1705                         if (matchptr[next_len - 1] != in_next[next_len - 1])
1706                                 goto R2_done;
1707                         for (unsigned len = 2; len < next_len - 1; len++)
1708                                 if (matchptr[len] != in_next[len])
1709                                         goto R2_done;
1710                         do {
1711                                 u32 cost = cur_node->cost +
1712                                            c->costs.match_cost[2][
1713                                                         next_len - LZX_MIN_MATCH_LEN];
1714                                 if (cost <= (cur_node + next_len)->cost) {
1715                                         (cur_node + next_len)->cost = cost;
1716                                         (cur_node + next_len)->item =
1717                                                 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1718                                 }
1719                                 if (unlikely(++next_len > max_len)) {
1720                                         cache_ptr = end_matches;
1721                                         goto done_matches;
1722                                 }
1723                         } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1724
1725                 R2_done:
1726
1727                         while (next_len > cache_ptr->length)
1728                                 if (++cache_ptr == end_matches)
1729                                         goto done_matches;
1730
1731                         /* Consider explicit offset matches  */
1732                         for (;;) {
1733                                 u32 offset = cache_ptr->offset;
1734                                 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1735                                 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1736                                                                                 is_16_bit);
1737                                 u32 base_cost = cur_node->cost;
1738                                 u32 cost;
1739
1740                         #if LZX_CONSIDER_ALIGNED_COSTS
1741                                 if (offset_data >= 16)
1742                                         base_cost += c->costs.aligned[offset_data &
1743                                                                       LZX_ALIGNED_OFFSET_BITMASK];
1744                         #endif
1745                                 do {
1746                                         cost = base_cost +
1747                                                c->costs.match_cost[offset_slot][
1748                                                                 next_len - LZX_MIN_MATCH_LEN];
1749                                         if (cost < (cur_node + next_len)->cost) {
1750                                                 (cur_node + next_len)->cost = cost;
1751                                                 (cur_node + next_len)->item =
1752                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1753                                         }
1754                                 } while (++next_len <= cache_ptr->length);
1755
1756                                 if (++cache_ptr == end_matches) {
1757                                         /* Consider match + lit + rep0 */
1758                                         u32 remaining = block_end - (in_next + next_len);
1759                                         if (likely(remaining >= 2)) {
1760                                                 const u8 *strptr = in_next + next_len;
1761                                                 const u8 *matchptr = strptr - offset;
1762                                                 if (unlikely(load_u16_unaligned(strptr) == load_u16_unaligned(matchptr))) {
1763                                                         u32 rep0_len = lz_extend(strptr, matchptr, 2,
1764                                                                                  min(remaining, LZX_MAX_MATCH_LEN));
1765                                                         u8 lit = strptr[-1];
1766                                                         cost += c->costs.main[lit] +
1767                                                                 c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN];
1768                                                         u32 total_len = next_len + rep0_len;
1769                                                         if (cost < (cur_node + total_len)->cost) {
1770                                                                 (cur_node + total_len)->cost = cost;
1771                                                                 (cur_node + total_len)->item =
1772                                                                         OPTIMUM_EXTRA_FLAG | rep0_len;
1773                                                                 (cur_node + total_len)->extra_literal = lit;
1774                                                                 (cur_node + total_len)->extra_match =
1775                                                                         (offset_data << OPTIMUM_OFFSET_SHIFT) | (next_len - 1);
1776                                                         }
1777                                                 }
1778                                         }
1779                                         break;
1780                                 }
1781                         }
1782                 }
1783
1784         done_matches:
1785
1786                 /* Consider coding a literal.
1787
1788                  * To avoid an extra branch, actually checking the preferability
1789                  * of coding the literal is integrated into the queue update
1790                  * code below.  */
1791                 literal = *in_next++;
1792                 cost = cur_node->cost + c->costs.main[literal];
1793
1794                 /* Advance to the next position.  */
1795                 cur_node++;
1796
1797                 /* The lowest-cost path to the current position is now known.
1798                  * Finalize the recent offsets queue that results from taking
1799                  * this lowest-cost path.  */
1800
1801                 if (cost <= cur_node->cost) {
1802                         /* Literal: queue remains unchanged.  */
1803                         cur_node->cost = cost;
1804                         cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1805                         QUEUE(in_next) = QUEUE(in_next - 1);
1806                 } else {
1807                         /* Match: queue update is needed.  */
1808                         unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1809                         u32 offset_data = (cur_node->item &
1810                                            ~OPTIMUM_EXTRA_FLAG) >> OPTIMUM_OFFSET_SHIFT;
1811                         if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1812                                 /* Explicit offset match: insert offset at front  */
1813                                 QUEUE(in_next) =
1814                                         lzx_lru_queue_push(QUEUE(in_next - len),
1815                                                            offset_data - LZX_OFFSET_ADJUSTMENT);
1816                         } else if (cur_node->item & OPTIMUM_EXTRA_FLAG) {
1817                                 /* Explicit offset match, then literal, then
1818                                  * rep0 match: insert offset at front  */
1819                                 len += 1 + (cur_node->extra_match & OPTIMUM_LEN_MASK);
1820                                 QUEUE(in_next) =
1821                                         lzx_lru_queue_push(QUEUE(in_next - len),
1822                                                            (cur_node->extra_match >> OPTIMUM_OFFSET_SHIFT) -
1823                                                            LZX_OFFSET_ADJUSTMENT);
1824                         } else {
1825                                 /* Repeat offset match: swap offset to front  */
1826                                 QUEUE(in_next) =
1827                                         lzx_lru_queue_swap(QUEUE(in_next - len),
1828                                                            offset_data);
1829                         }
1830                 }
1831         } while (in_next != block_end);
1832
1833         /* Return the match offset queue at the end of the minimum cost path. */
1834         return QUEUE(block_end);
1835 }
1836
1837 /* Given the costs for the main and length codewords, compute 'match_costs'.  */
1838 static void
1839 lzx_compute_match_costs(struct lzx_compressor *c)
1840 {
1841         unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1842                                         LZX_NUM_LEN_HEADERS;
1843         struct lzx_costs *costs = &c->costs;
1844
1845         for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1846
1847                 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1848                 unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
1849                                                         LZX_NUM_LEN_HEADERS);
1850                 unsigned i;
1851
1852         #if LZX_CONSIDER_ALIGNED_COSTS
1853                 if (offset_slot >= 8)
1854                         extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1855         #endif
1856
1857                 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1858                         costs->match_cost[offset_slot][i] =
1859                                 costs->main[main_symbol++] + extra_cost;
1860
1861                 extra_cost += costs->main[main_symbol];
1862
1863                 for (; i < LZX_NUM_LENS; i++)
1864                         costs->match_cost[offset_slot][i] =
1865                                 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1866         }
1867 }
1868
1869 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1870  * algorithm.  */
1871 static void
1872 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1873 {
1874         u32 i;
1875         bool have_byte[256];
1876         unsigned num_used_bytes;
1877
1878         /* The costs below are hard coded to use a scaling factor of 64.  */
1879         STATIC_ASSERT(LZX_BIT_COST == 64);
1880
1881         /*
1882          * Heuristics:
1883          *
1884          * - Use smaller initial costs for literal symbols when the input buffer
1885          *   contains fewer distinct bytes.
1886          *
1887          * - Assume that match symbols are more costly than literal symbols.
1888          *
1889          * - Assume that length symbols for shorter lengths are less costly than
1890          *   length symbols for longer lengths.
1891          */
1892
1893         for (i = 0; i < 256; i++)
1894                 have_byte[i] = false;
1895
1896         for (i = 0; i < block_size; i++)
1897                 have_byte[block[i]] = true;
1898
1899         num_used_bytes = 0;
1900         for (i = 0; i < 256; i++)
1901                 num_used_bytes += have_byte[i];
1902
1903         for (i = 0; i < 256; i++)
1904                 c->costs.main[i] = 560 - (256 - num_used_bytes);
1905
1906         for (; i < c->num_main_syms; i++)
1907                 c->costs.main[i] = 680;
1908
1909         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1910                 c->costs.len[i] = 412 + i;
1911
1912 #if LZX_CONSIDER_ALIGNED_COSTS
1913         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1914                 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1915 #endif
1916
1917         lzx_compute_match_costs(c);
1918 }
1919
1920 /* Update the current cost model to reflect the computed Huffman codes.  */
1921 static void
1922 lzx_set_costs_from_codes(struct lzx_compressor *c)
1923 {
1924         unsigned i;
1925         const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1926
1927         for (i = 0; i < c->num_main_syms; i++) {
1928                 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
1929                                     MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
1930         }
1931
1932         for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1933                 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
1934                                    LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
1935         }
1936
1937 #if LZX_CONSIDER_ALIGNED_COSTS
1938         for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1939                 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
1940                                        ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
1941         }
1942 #endif
1943
1944         lzx_compute_match_costs(c);
1945 }
1946
1947 /*
1948  * Choose a "near-optimal" literal/match sequence to use for the current block.
1949  * Because the cost of each Huffman symbol is unknown until the Huffman codes
1950  * have been built and the Huffman codes themselves depend on the symbol
1951  * frequencies, this uses an iterative optimization algorithm to approximate an
1952  * optimal solution.  The first optimization pass for the block uses default
1953  * costs.  Additional passes use costs taken from the Huffman codes computed in
1954  * the previous pass.
1955  */
1956 static inline struct lzx_lru_queue
1957 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1958                              struct lzx_output_bitstream * const restrict os,
1959                              const u8 * const restrict block_begin,
1960                              const u32 block_size,
1961                              const struct lzx_lru_queue initial_queue,
1962                              bool is_16_bit)
1963 {
1964         unsigned num_passes_remaining = c->num_optim_passes;
1965         struct lzx_lru_queue new_queue;
1966         u32 seq_idx;
1967
1968         lzx_set_default_costs(c, block_begin, block_size);
1969
1970         for (;;) {
1971                 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1972                                                    initial_queue, is_16_bit);
1973
1974                 if (--num_passes_remaining == 0)
1975                         break;
1976
1977                 /* At least one optimization pass remains; update the costs. */
1978                 lzx_reset_symbol_frequencies(c);
1979                 lzx_tally_item_list(c, block_size, is_16_bit);
1980                 lzx_make_huffman_codes(c);
1981                 lzx_set_costs_from_codes(c);
1982         }
1983
1984         /* Done optimizing.  Generate the sequence list and flush the block. */
1985         lzx_reset_symbol_frequencies(c);
1986         seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1987         lzx_flush_block(c, os, block_begin, block_size, seq_idx);
1988         return new_queue;
1989 }
1990
1991 /*
1992  * This is the "near-optimal" LZX compressor.
1993  *
1994  * For each block, it performs a relatively thorough graph search to find an
1995  * inexpensive (in terms of compressed size) way to output that block.
1996  *
1997  * Note: there are actually many things this algorithm leaves on the table in
1998  * terms of compression ratio.  So although it may be "near-optimal", it is
1999  * certainly not "optimal".  The goal is not to produce the optimal compression
2000  * ratio, which for LZX is probably impossible within any practical amount of
2001  * time, but rather to produce a compression ratio significantly better than a
2002  * simpler "greedy" or "lazy" parse while still being relatively fast.
2003  */
2004 static inline void
2005 lzx_compress_near_optimal(struct lzx_compressor * restrict c,
2006                           const u8 * const restrict in_begin,
2007                           struct lzx_output_bitstream * restrict os,
2008                           bool is_16_bit)
2009 {
2010         const u8 *       in_next = in_begin;
2011         const u8 * const in_end  = in_begin + c->in_nbytes;
2012         u32 max_len = LZX_MAX_MATCH_LEN;
2013         u32 nice_len = min(c->nice_match_length, max_len);
2014         u32 next_hashes[2] = {};
2015         struct lzx_lru_queue queue;
2016
2017         CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
2018         lzx_lru_queue_init(&queue);
2019
2020         do {
2021                 /* Starting a new block  */
2022                 const u8 * const in_block_begin = in_next;
2023                 const u8 * const in_max_block_end =
2024                         in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2025                 struct lz_match *cache_ptr = c->match_cache;
2026                 const u8 *next_search_pos = in_next;
2027                 const u8 *next_observation = in_next;
2028                 const u8 *next_pause_point = min(in_next + MIN_BLOCK_SIZE,
2029                                                  in_max_block_end - LZX_MAX_MATCH_LEN - 1);
2030
2031                 init_block_split_stats(&c->split_stats);
2032
2033                 /* Run the block through the matchfinder and cache the matches. */
2034         enter_mf_loop:
2035                 do {
2036                         if (in_next >= next_search_pos) {
2037                                 struct lz_match *lz_matchptr;
2038                                 u32 best_len;
2039
2040                                 lz_matchptr = CALL_BT_MF(is_16_bit, c,
2041                                                          bt_matchfinder_get_matches,
2042                                                          in_begin,
2043                                                          in_next - in_begin,
2044                                                          max_len,
2045                                                          nice_len,
2046                                                          c->max_search_depth,
2047                                                          next_hashes,
2048                                                          &best_len,
2049                                                          cache_ptr + 1);
2050                                 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
2051                                 cache_ptr = lz_matchptr;
2052
2053                                 if (in_next >= next_observation) {
2054                                         best_len = cache_ptr[-1].length;
2055                                         if (best_len) {
2056                                                 observe_match(&c->split_stats, best_len);
2057                                                 next_observation = in_next + best_len;
2058                                         } else {
2059                                                 observe_literal(&c->split_stats, *in_next);
2060                                                 next_observation = in_next + 1;
2061                                         }
2062                                 }
2063                                 /*
2064                                  * If there was a very long match found, then don't
2065                                  * cache any matches for the bytes covered by that
2066                                  * match.  This avoids degenerate behavior when
2067                                  * compressing highly redundant data, where the number
2068                                  * of matches can be very large.
2069                                  *
2070                                  * This heuristic doesn't actually hurt the compression
2071                                  * ratio very much.  If there's a long match, then the
2072                                  * data must be highly compressible, so it doesn't
2073                                  * matter as much what we do.
2074                                  */
2075                                 if (best_len >= nice_len) {
2076                                         next_search_pos = in_next + best_len;
2077                                         next_observation = next_search_pos;
2078                                 }
2079                         } else {
2080                                 CALL_BT_MF(is_16_bit, c,
2081                                            bt_matchfinder_skip_position,
2082                                            in_begin,
2083                                            in_next - in_begin,
2084                                            nice_len,
2085                                            c->max_search_depth,
2086                                            next_hashes);
2087                                 cache_ptr->length = 0;
2088                                 cache_ptr++;
2089                         }
2090                 } while (++in_next < next_pause_point &&
2091                          likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
2092
2093                 if (unlikely(cache_ptr >= &c->match_cache[LZX_CACHE_LENGTH]))
2094                         goto flush_block;
2095
2096                 if (max_len > in_end - in_next) {
2097                         max_len = in_end - in_next;
2098                         nice_len = min(max_len, nice_len);
2099                         if (unlikely(max_len < BT_MATCHFINDER_REQUIRED_NBYTES)) {
2100                                 while (in_next != in_end) {
2101                                         in_next++;
2102                                         cache_ptr->length = 0;
2103                                         cache_ptr++;
2104                                 }
2105                         }
2106                 }
2107
2108                 if (in_next >= in_max_block_end)
2109                         goto flush_block;
2110
2111                 if (c->split_stats.num_new_observations >= NUM_OBSERVATIONS_PER_BLOCK_CHECK) {
2112                         if (do_end_block_check(&c->split_stats, in_next - in_block_begin))
2113                                 goto flush_block;
2114                         if (in_max_block_end - in_next <= MIN_BLOCK_SIZE)
2115                                 next_observation = in_max_block_end;
2116                 }
2117
2118                 next_pause_point = min(in_next +
2119                                        NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 -
2120                                        c->split_stats.num_new_observations,
2121                                        in_max_block_end - LZX_MAX_MATCH_LEN - 1);
2122                 goto enter_mf_loop;
2123
2124         flush_block:
2125                 /* We've finished running the block through the matchfinder.
2126                  * Now choose a match/literal sequence and write the block.  */
2127
2128                 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
2129                                                      in_next - in_block_begin,
2130                                                      queue, is_16_bit);
2131         } while (in_next != in_end);
2132 }
2133
2134 static void
2135 lzx_compress_near_optimal_16(struct lzx_compressor *c,
2136                              struct lzx_output_bitstream *os)
2137 {
2138         lzx_compress_near_optimal(c, c->in_buffer, os, true);
2139 }
2140
2141 static void
2142 lzx_compress_near_optimal_32(struct lzx_compressor *c,
2143                              struct lzx_output_bitstream *os)
2144 {
2145         lzx_compress_near_optimal(c, c->in_buffer, os, false);
2146 }
2147
2148 /*
2149  * Given a pointer to the current byte sequence and the current list of recent
2150  * match offsets, find the longest repeat offset match.
2151  *
2152  * If no match of at least 2 bytes is found, then return 0.
2153  *
2154  * If a match of at least 2 bytes is found, then return its length and set
2155  * *rep_max_idx_ret to the index of its offset in @queue.
2156 */
2157 static unsigned
2158 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
2159                                      const u32 bytes_remaining,
2160                                      const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
2161                                      unsigned *rep_max_idx_ret)
2162 {
2163         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2164
2165         const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
2166         const u16 next_2_bytes = load_u16_unaligned(in_next);
2167         const u8 *matchptr;
2168         unsigned rep_max_len;
2169         unsigned rep_max_idx;
2170         unsigned rep_len;
2171
2172         matchptr = in_next - recent_offsets[0];
2173         if (load_u16_unaligned(matchptr) == next_2_bytes)
2174                 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
2175         else
2176                 rep_max_len = 0;
2177         rep_max_idx = 0;
2178
2179         matchptr = in_next - recent_offsets[1];
2180         if (load_u16_unaligned(matchptr) == next_2_bytes) {
2181                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2182                 if (rep_len > rep_max_len) {
2183                         rep_max_len = rep_len;
2184                         rep_max_idx = 1;
2185                 }
2186         }
2187
2188         matchptr = in_next - recent_offsets[2];
2189         if (load_u16_unaligned(matchptr) == next_2_bytes) {
2190                 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2191                 if (rep_len > rep_max_len) {
2192                         rep_max_len = rep_len;
2193                         rep_max_idx = 2;
2194                 }
2195         }
2196
2197         *rep_max_idx_ret = rep_max_idx;
2198         return rep_max_len;
2199 }
2200
2201 /* Fast heuristic scoring for lazy parsing: how "good" is this match?  */
2202 static inline unsigned
2203 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
2204 {
2205         unsigned score = len;
2206
2207         if (adjusted_offset < 4096)
2208                 score++;
2209
2210         if (adjusted_offset < 256)
2211                 score++;
2212
2213         return score;
2214 }
2215
2216 static inline unsigned
2217 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2218 {
2219         return rep_len + 3;
2220 }
2221
2222 /* This is the "lazy" LZX compressor.  */
2223 static inline void
2224 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2225                   bool is_16_bit)
2226 {
2227         const u8 * const in_begin = c->in_buffer;
2228         const u8 *       in_next = in_begin;
2229         const u8 * const in_end  = in_begin + c->in_nbytes;
2230         unsigned max_len = LZX_MAX_MATCH_LEN;
2231         unsigned nice_len = min(c->nice_match_length, max_len);
2232         STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2233         u32 recent_offsets[3] = {1, 1, 1};
2234         u32 next_hashes[2] = {};
2235
2236         CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2237
2238         do {
2239                 /* Starting a new block  */
2240
2241                 const u8 * const in_block_begin = in_next;
2242                 const u8 * const in_max_block_end =
2243                         in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2244                 struct lzx_sequence *next_seq = c->chosen_sequences;
2245                 unsigned cur_len;
2246                 u32 cur_offset;
2247                 u32 cur_offset_data;
2248                 unsigned cur_score;
2249                 unsigned next_len;
2250                 u32 next_offset;
2251                 u32 next_offset_data;
2252                 unsigned next_score;
2253                 unsigned rep_max_len;
2254                 unsigned rep_max_idx;
2255                 unsigned rep_score;
2256                 unsigned skip_len;
2257                 u32 litrunlen = 0;
2258
2259                 lzx_reset_symbol_frequencies(c);
2260                 init_block_split_stats(&c->split_stats);
2261
2262                 do {
2263                         if (unlikely(max_len > in_end - in_next)) {
2264                                 max_len = in_end - in_next;
2265                                 nice_len = min(max_len, nice_len);
2266                         }
2267
2268                         /* Find the longest match at the current position.  */
2269
2270                         cur_len = CALL_HC_MF(is_16_bit, c,
2271                                              hc_matchfinder_longest_match,
2272                                              in_begin,
2273                                              in_next - in_begin,
2274                                              2,
2275                                              max_len,
2276                                              nice_len,
2277                                              c->max_search_depth,
2278                                              next_hashes,
2279                                              &cur_offset);
2280                         if (cur_len < 3 ||
2281                             (cur_len == 3 &&
2282                              cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2283                              cur_offset != recent_offsets[0] &&
2284                              cur_offset != recent_offsets[1] &&
2285                              cur_offset != recent_offsets[2]))
2286                         {
2287                                 /* There was no match found, or the only match found
2288                                  * was a distant length 3 match.  Output a literal.  */
2289                                 lzx_record_literal(c, *in_next, &litrunlen);
2290                                 observe_literal(&c->split_stats, *in_next);
2291                                 in_next++;
2292                                 continue;
2293                         }
2294
2295                         observe_match(&c->split_stats, cur_len);
2296
2297                         if (cur_offset == recent_offsets[0]) {
2298                                 in_next++;
2299                                 cur_offset_data = 0;
2300                                 skip_len = cur_len - 1;
2301                                 goto choose_cur_match;
2302                         }
2303
2304                         cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2305                         cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2306
2307                         /* Consider a repeat offset match  */
2308                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2309                                                                            in_end - in_next,
2310                                                                            recent_offsets,
2311                                                                            &rep_max_idx);
2312                         in_next++;
2313
2314                         if (rep_max_len >= 3 &&
2315                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2316                                                                        rep_max_idx)) >= cur_score)
2317                         {
2318                                 cur_len = rep_max_len;
2319                                 cur_offset_data = rep_max_idx;
2320                                 skip_len = rep_max_len - 1;
2321                                 goto choose_cur_match;
2322                         }
2323
2324                 have_cur_match:
2325
2326                         /* We have a match at the current position.  */
2327
2328                         /* If we have a very long match, choose it immediately.  */
2329                         if (cur_len >= nice_len) {
2330                                 skip_len = cur_len - 1;
2331                                 goto choose_cur_match;
2332                         }
2333
2334                         /* See if there's a better match at the next position.  */
2335
2336                         if (unlikely(max_len > in_end - in_next)) {
2337                                 max_len = in_end - in_next;
2338                                 nice_len = min(max_len, nice_len);
2339                         }
2340
2341                         next_len = CALL_HC_MF(is_16_bit, c,
2342                                               hc_matchfinder_longest_match,
2343                                               in_begin,
2344                                               in_next - in_begin,
2345                                               cur_len - 2,
2346                                               max_len,
2347                                               nice_len,
2348                                               c->max_search_depth / 2,
2349                                               next_hashes,
2350                                               &next_offset);
2351
2352                         if (next_len <= cur_len - 2) {
2353                                 in_next++;
2354                                 skip_len = cur_len - 2;
2355                                 goto choose_cur_match;
2356                         }
2357
2358                         next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2359                         next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2360
2361                         rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2362                                                                            in_end - in_next,
2363                                                                            recent_offsets,
2364                                                                            &rep_max_idx);
2365                         in_next++;
2366
2367                         if (rep_max_len >= 3 &&
2368                             (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2369                                                                        rep_max_idx)) >= next_score)
2370                         {
2371
2372                                 if (rep_score > cur_score) {
2373                                         /* The next match is better, and it's a
2374                                          * repeat offset match.  */
2375                                         lzx_record_literal(c, *(in_next - 2),
2376                                                            &litrunlen);
2377                                         cur_len = rep_max_len;
2378                                         cur_offset_data = rep_max_idx;
2379                                         skip_len = cur_len - 1;
2380                                         goto choose_cur_match;
2381                                 }
2382                         } else {
2383                                 if (next_score > cur_score) {
2384                                         /* The next match is better, and it's an
2385                                          * explicit offset match.  */
2386                                         lzx_record_literal(c, *(in_next - 2),
2387                                                            &litrunlen);
2388                                         cur_len = next_len;
2389                                         cur_offset_data = next_offset_data;
2390                                         cur_score = next_score;
2391                                         goto have_cur_match;
2392                                 }
2393                         }
2394
2395                         /* The original match was better.  */
2396                         skip_len = cur_len - 2;
2397
2398                 choose_cur_match:
2399                         lzx_record_match(c, cur_len, cur_offset_data,
2400                                          recent_offsets, is_16_bit,
2401                                          &litrunlen, &next_seq);
2402                         in_next = CALL_HC_MF(is_16_bit, c,
2403                                              hc_matchfinder_skip_positions,
2404                                              in_begin,
2405                                              in_next - in_begin,
2406                                              in_end - in_begin,
2407                                              skip_len,
2408                                              next_hashes);
2409                 } while (in_next < in_max_block_end &&
2410                          !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2411
2412                 lzx_finish_sequence(next_seq, litrunlen);
2413
2414                 lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2415
2416         } while (in_next != in_end);
2417 }
2418
2419 static void
2420 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2421 {
2422         lzx_compress_lazy(c, os, true);
2423 }
2424
2425 static void
2426 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2427 {
2428         lzx_compress_lazy(c, os, false);
2429 }
2430
2431 /* Generate the acceleration tables for offset slots.  */
2432 static void
2433 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2434 {
2435         u32 adjusted_offset = 0;
2436         unsigned slot = 0;
2437
2438         /* slots [0, 29]  */
2439         for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2440              adjusted_offset++)
2441         {
2442                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2443                         slot++;
2444                 c->offset_slot_tab_1[adjusted_offset] = slot;
2445         }
2446
2447         /* slots [30, 49]  */
2448         for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2449              adjusted_offset += (u32)1 << 14)
2450         {
2451                 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2452                         slot++;
2453                 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2454         }
2455 }
2456
2457 static size_t
2458 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2459 {
2460         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2461                 if (lzx_is_16_bit(max_bufsize))
2462                         return offsetof(struct lzx_compressor, hc_mf_16) +
2463                                hc_matchfinder_size_16(max_bufsize);
2464                 else
2465                         return offsetof(struct lzx_compressor, hc_mf_32) +
2466                                hc_matchfinder_size_32(max_bufsize);
2467         } else {
2468                 if (lzx_is_16_bit(max_bufsize))
2469                         return offsetof(struct lzx_compressor, bt_mf_16) +
2470                                bt_matchfinder_size_16(max_bufsize);
2471                 else
2472                         return offsetof(struct lzx_compressor, bt_mf_32) +
2473                                bt_matchfinder_size_32(max_bufsize);
2474         }
2475 }
2476
2477 static u64
2478 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2479                       bool destructive)
2480 {
2481         u64 size = 0;
2482
2483         if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2484                 return 0;
2485
2486         size += lzx_get_compressor_size(max_bufsize, compression_level);
2487         if (!destructive)
2488                 size += max_bufsize; /* in_buffer */
2489         return size;
2490 }
2491
2492 static int
2493 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2494                       bool destructive, void **c_ret)
2495 {
2496         unsigned window_order;
2497         struct lzx_compressor *c;
2498
2499         window_order = lzx_get_window_order(max_bufsize);
2500         if (window_order == 0)
2501                 return WIMLIB_ERR_INVALID_PARAM;
2502
2503         c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2504         if (!c)
2505                 goto oom0;
2506
2507         c->destructive = destructive;
2508
2509         c->num_main_syms = lzx_get_num_main_syms(window_order);
2510         c->window_order = window_order;
2511
2512         if (!c->destructive) {
2513                 c->in_buffer = MALLOC(max_bufsize);
2514                 if (!c->in_buffer)
2515                         goto oom1;
2516         }
2517
2518         if (compression_level <= LZX_MAX_FAST_LEVEL) {
2519
2520                 /* Fast compression: Use lazy parsing.  */
2521
2522                 if (lzx_is_16_bit(max_bufsize))
2523                         c->impl = lzx_compress_lazy_16;
2524                 else
2525                         c->impl = lzx_compress_lazy_32;
2526                 c->max_search_depth = (60 * compression_level) / 20;
2527                 c->nice_match_length = (80 * compression_level) / 20;
2528
2529                 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2530                  * halves the max_search_depth when attempting a lazy match, and
2531                  * max_search_depth cannot be 0.  */
2532                 if (c->max_search_depth < 2)
2533                         c->max_search_depth = 2;
2534         } else {
2535
2536                 /* Normal / high compression: Use near-optimal parsing.  */
2537
2538                 if (lzx_is_16_bit(max_bufsize))
2539                         c->impl = lzx_compress_near_optimal_16;
2540                 else
2541                         c->impl = lzx_compress_near_optimal_32;
2542
2543                 /* Scale nice_match_length and max_search_depth with the
2544                  * compression level.  */
2545                 c->max_search_depth = (24 * compression_level) / 50;
2546                 c->nice_match_length = (48 * compression_level) / 50;
2547
2548                 /* Set a number of optimization passes appropriate for the
2549                  * compression level.  */
2550
2551                 c->num_optim_passes = 1;
2552
2553                 if (compression_level >= 45)
2554                         c->num_optim_passes++;
2555
2556                 /* Use more optimization passes for higher compression levels.
2557                  * But the more passes there are, the less they help --- so
2558                  * don't add them linearly.  */
2559                 if (compression_level >= 70) {
2560                         c->num_optim_passes++;
2561                         if (compression_level >= 100)
2562                                 c->num_optim_passes++;
2563                         if (compression_level >= 150)
2564                                 c->num_optim_passes++;
2565                         if (compression_level >= 200)
2566                                 c->num_optim_passes++;
2567                         if (compression_level >= 300)
2568                                 c->num_optim_passes++;
2569                 }
2570         }
2571
2572         /* max_search_depth == 0 is invalid.  */
2573         if (c->max_search_depth < 1)
2574                 c->max_search_depth = 1;
2575
2576         if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2577                 c->nice_match_length = LZX_MAX_MATCH_LEN;
2578
2579         lzx_init_offset_slot_tabs(c);
2580         *c_ret = c;
2581         return 0;
2582
2583 oom1:
2584         FREE(c);
2585 oom0:
2586         return WIMLIB_ERR_NOMEM;
2587 }
2588
2589 static size_t
2590 lzx_compress(const void *restrict in, size_t in_nbytes,
2591              void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2592 {
2593         struct lzx_compressor *c = _c;
2594         struct lzx_output_bitstream os;
2595         size_t result;
2596
2597         /* Don't bother trying to compress very small inputs.  */
2598         if (in_nbytes < 100)
2599                 return 0;
2600
2601         /* Copy the input data into the internal buffer and preprocess it.  */
2602         if (c->destructive)
2603                 c->in_buffer = (void *)in;
2604         else
2605                 memcpy(c->in_buffer, in, in_nbytes);
2606         c->in_nbytes = in_nbytes;
2607         lzx_preprocess(c->in_buffer, in_nbytes);
2608
2609         /* Initially, the previous Huffman codeword lengths are all zeroes.  */
2610         c->codes_index = 0;
2611         memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2612
2613         /* Initialize the output bitstream.  */
2614         lzx_init_output(&os, out, out_nbytes_avail);
2615
2616         /* Call the compression level-specific compress() function.  */
2617         (*c->impl)(c, &os);
2618
2619         /* Flush the output bitstream and return the compressed size or 0.  */
2620         result = lzx_flush_output(&os);
2621         if (!result && c->destructive)
2622                 lzx_postprocess(c->in_buffer, c->in_nbytes);
2623         return result;
2624 }
2625
2626 static void
2627 lzx_free_compressor(void *_c)
2628 {
2629         struct lzx_compressor *c = _c;
2630
2631         if (!c->destructive)
2632                 FREE(c->in_buffer);
2633         FREE(c);
2634 }
2635
2636 const struct compressor_ops lzx_compressor_ops = {
2637         .get_needed_memory  = lzx_get_needed_memory,
2638         .create_compressor  = lzx_create_compressor,
2639         .compress           = lzx_compress,
2640         .free_compressor    = lzx_free_compressor,
2641 };