4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * Start a new LZX block (with new Huffman codes) after this many bytes.
70 * Note: actual block sizes may slightly exceed this value.
72 * TODO: recursive splitting and cost evaluation might be good for an extremely
73 * high compression mode, but otherwise it is almost always far too slow for how
74 * much it helps. Perhaps some sort of heuristic would be useful?
76 #define LZX_DIV_BLOCK_SIZE 32768
79 * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80 * match cache for each byte position. This value should be high enough so that
81 * nearly the time, all matches found in a given block can fit in the match
82 * cache. However, fallback behavior (immediately terminating the block) on
83 * cache overflow is still required.
85 #define LZX_CACHE_PER_POS 7
88 * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89 * excluding the extra "overflow" entries. The per-position multiplier is '1 +
90 * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91 * overhead of one lz_match per position, used to hold the match count at that
94 #define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
97 * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98 * ever be saved in the match cache for a single position. Since each match we
99 * save for a single position has a distinct length, we can use the number of
100 * possible match lengths in LZX as this bound. This bound is guaranteed to be
101 * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102 * it will never actually be reached.
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
107 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108 * This makes it possible to consider fractional bit costs.
110 * Note: this is only useful as a statistical trick for when the true costs are
111 * unknown. In reality, each token in LZX requires a whole number of bits to
114 #define LZX_BIT_COST 16
117 * Consideration of aligned offset costs is disabled for now, due to
118 * insufficient benefit gained from the time spent.
120 #define LZX_CONSIDER_ALIGNED_COSTS 0
123 * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
126 #define LZX_MAX_FAST_LEVEL 34
129 * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130 * for finding length 2 matches. This can be as high as 16 (in which case the
131 * hash function is trivial), but using a smaller hash table speeds up
132 * compression due to reduced cache pressure.
134 #define LZX_HASH2_ORDER 12
135 #define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
138 * These are the compressor-side limits on the codeword lengths for each Huffman
139 * code. To make outputting bits slightly faster, some of these limits are
140 * lower than the limits defined by the LZX format. This does not significantly
141 * affect the compression ratio, at least for the block sizes we use.
143 #define MAIN_CODEWORD_LIMIT 12 /* 64-bit: can buffer 4 main symbols */
144 #define LENGTH_CODEWORD_LIMIT 12
145 #define ALIGNED_CODEWORD_LIMIT 7
146 #define PRE_CODEWORD_LIMIT 7
148 #include "wimlib/compress_common.h"
149 #include "wimlib/compressor_ops.h"
150 #include "wimlib/error.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/lzx_common.h"
153 #include "wimlib/unaligned.h"
154 #include "wimlib/util.h"
156 /* Matchfinders with 16-bit positions */
158 #define MF_SUFFIX _16
159 #include "wimlib/bt_matchfinder.h"
160 #include "wimlib/hc_matchfinder.h"
162 /* Matchfinders with 32-bit positions */
166 #define MF_SUFFIX _32
167 #include "wimlib/bt_matchfinder.h"
168 #include "wimlib/hc_matchfinder.h"
170 struct lzx_output_bitstream;
172 /* Codewords for the LZX Huffman codes. */
173 struct lzx_codewords {
174 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
175 u32 len[LZX_LENCODE_NUM_SYMBOLS];
176 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
179 /* Codeword lengths (in bits) for the LZX Huffman codes.
180 * A zero length means the corresponding codeword has zero frequency. */
182 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
183 u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
184 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
187 /* Cost model for near-optimal parsing */
190 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
191 * length 'len' match that has an offset belonging to 'offset_slot'. */
192 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
194 /* Cost for each symbol in the main code */
195 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
197 /* Cost for each symbol in the length code */
198 u32 len[LZX_LENCODE_NUM_SYMBOLS];
200 #if LZX_CONSIDER_ALIGNED_COSTS
201 /* Cost for each symbol in the aligned code */
202 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
206 /* Codewords and lengths for the LZX Huffman codes. */
208 struct lzx_codewords codewords;
209 struct lzx_lens lens;
212 /* Symbol frequency counters for the LZX Huffman codes. */
214 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
215 u32 len[LZX_LENCODE_NUM_SYMBOLS];
216 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
220 * Represents a run of literals followed by a match or end-of-block. This
221 * struct is needed to temporarily store items chosen by the parser, since items
222 * cannot be written until all items for the block have been chosen and the
223 * block's Huffman codes have been computed.
225 struct lzx_sequence {
227 /* The number of literals in the run. This may be 0. The literals are
228 * not stored explicitly in this structure; instead, they are read
229 * directly from the uncompressed data. */
232 /* If the next field doesn't indicate end-of-block, then this is the
233 * match length minus LZX_MIN_MATCH_LEN. */
236 /* If bit 31 is clear, then this field contains the match header in bits
237 * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
238 * Otherwise, this sequence's literal run was the last literal run in
239 * the block, so there is no match that follows it. */
240 u32 adjusted_offset_and_match_hdr;
244 * This structure represents a byte position in the input buffer and a node in
245 * the graph of possible match/literal choices.
247 * Logically, each incoming edge to this node is labeled with a literal or a
248 * match that can be taken to reach this position from an earlier position; and
249 * each outgoing edge from this node is labeled with a literal or a match that
250 * can be taken to advance from this position to a later position.
252 struct lzx_optimum_node {
254 /* The cost, in bits, of the lowest-cost path that has been found to
255 * reach this position. This can change as progressively lower cost
256 * paths are found to reach this position. */
260 * The match or literal that was taken to reach this position. This can
261 * change as progressively lower cost paths are found to reach this
264 * This variable is divided into two bitfields.
267 * Low bits are 0, high bits are the literal.
269 * Explicit offset matches:
270 * Low bits are the match length, high bits are the offset plus 2.
272 * Repeat offset matches:
273 * Low bits are the match length, high bits are the queue index.
276 #define OPTIMUM_OFFSET_SHIFT 9
277 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
278 } _aligned_attribute(8);
281 * Least-recently-used queue for match offsets.
283 * This is represented as a 64-bit integer for efficiency. There are three
284 * offsets of 21 bits each. Bit 64 is garbage.
286 struct lzx_lru_queue {
290 #define LZX_QUEUE64_OFFSET_SHIFT 21
291 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
293 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
294 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
295 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
297 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
298 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
299 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
302 lzx_lru_queue_init(struct lzx_lru_queue *queue)
304 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
305 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
306 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
310 lzx_lru_queue_R0(struct lzx_lru_queue queue)
312 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
316 lzx_lru_queue_R1(struct lzx_lru_queue queue)
318 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
322 lzx_lru_queue_R2(struct lzx_lru_queue queue)
324 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
327 /* Push a match offset onto the front (most recently used) end of the queue. */
328 static inline struct lzx_lru_queue
329 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
331 return (struct lzx_lru_queue) {
332 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
336 /* Pop a match offset off the front (most recently used) end of the queue. */
338 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
340 u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
341 queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
345 /* Swap a match offset to the front of the queue. */
346 static inline struct lzx_lru_queue
347 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
353 return (struct lzx_lru_queue) {
354 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
355 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
356 (queue.R & LZX_QUEUE64_R2_MASK),
359 return (struct lzx_lru_queue) {
360 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
361 (queue.R & LZX_QUEUE64_R1_MASK) |
362 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
366 /* The main LZX compressor structure */
367 struct lzx_compressor {
369 /* The "nice" match length: if a match of this length is found, then
370 * choose it immediately without further consideration. */
371 unsigned nice_match_length;
373 /* The maximum search depth: consider at most this many potential
374 * matches at each position. */
375 unsigned max_search_depth;
377 /* The log base 2 of the LZX window size for LZ match offset encoding
378 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
379 * LZX_MAX_WINDOW_ORDER. */
380 unsigned window_order;
382 /* The number of symbols in the main alphabet. This depends on
383 * @window_order, since @window_order determines the maximum possible
385 unsigned num_main_syms;
387 /* Number of optimization passes per block */
388 unsigned num_optim_passes;
390 /* The preprocessed buffer of data being compressed */
393 /* The number of bytes of data to be compressed, which is the number of
394 * bytes of data in @in_buffer that are actually valid. */
397 /* Pointer to the compress() implementation chosen at allocation time */
398 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
400 /* If true, the compressor need not preserve the input buffer if it
401 * compresses the data successfully. */
404 /* The Huffman symbol frequency counters for the current block. */
405 struct lzx_freqs freqs;
407 /* The Huffman codes for the current and previous blocks. The one with
408 * index 'codes_index' is for the current block, and the other one is
409 * for the previous block. */
410 struct lzx_codes codes[2];
411 unsigned codes_index;
413 /* The matches and literals that the parser has chosen for the current
414 * block. The required length of this array is limited by the maximum
415 * number of matches that can ever be chosen for a single block. */
416 struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
418 /* Tables for mapping adjusted offsets to offset slots */
420 /* offset slots [0, 29] */
421 u8 offset_slot_tab_1[32768];
423 /* offset slots [30, 49] */
424 u8 offset_slot_tab_2[128];
427 /* Data for greedy or lazy parsing */
429 /* Hash chains matchfinder (MUST BE LAST!!!) */
431 struct hc_matchfinder_16 hc_mf_16;
432 struct hc_matchfinder_32 hc_mf_32;
436 /* Data for near-optimal parsing */
439 * The graph nodes for the current block.
441 * We need at least 'LZX_DIV_BLOCK_SIZE +
442 * LZX_MAX_MATCH_LEN - 1' nodes because that is the
443 * maximum block size that may be used. Add 1 because
444 * we need a node to represent end-of-block.
446 * It is possible that nodes past end-of-block are
447 * accessed during match consideration, but this can
448 * only occur if the block was truncated at
449 * LZX_DIV_BLOCK_SIZE. So the same bound still applies.
450 * Note that since nodes past the end of the block will
451 * never actually have an effect on the items that are
452 * chosen for the block, it makes no difference what
453 * their costs are initialized to (if anything).
455 struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
456 LZX_MAX_MATCH_LEN - 1 + 1];
458 /* The cost model for the current block */
459 struct lzx_costs costs;
462 * Cached matches for the current block. This array
463 * contains the matches that were found at each position
464 * in the block. Specifically, for each position, there
465 * is a special 'struct lz_match' whose 'length' field
466 * contains the number of matches that were found at
467 * that position; this is followed by the matches
468 * themselves, if any, sorted by strictly increasing
471 * Note: in rare cases, there will be a very high number
472 * of matches in the block and this array will overflow.
473 * If this happens, we force the end of the current
474 * block. LZX_CACHE_LENGTH is the length at which we
475 * actually check for overflow. The extra slots beyond
476 * this are enough to absorb the worst case overflow,
477 * which occurs if starting at
478 * &match_cache[LZX_CACHE_LENGTH - 1], we write the
479 * match count header, then write
480 * LZX_MAX_MATCHES_PER_POS matches, then skip searching
481 * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
482 * write the match count header for each.
484 struct lz_match match_cache[LZX_CACHE_LENGTH +
485 LZX_MAX_MATCHES_PER_POS +
486 LZX_MAX_MATCH_LEN - 1];
488 /* Hash table for finding length 2 matches */
489 u32 hash2_tab[LZX_HASH2_LENGTH];
491 /* Binary trees matchfinder (MUST BE LAST!!!) */
493 struct bt_matchfinder_16 bt_mf_16;
494 struct bt_matchfinder_32 bt_mf_32;
501 * Will a matchfinder using 16-bit positions be sufficient for compressing
502 * buffers of up to the specified size? The limit could be 65536 bytes, but we
503 * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
504 * This requires that the limit be no more than the length of offset_slot_tab_1
508 lzx_is_16_bit(size_t max_bufsize)
510 STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
511 return max_bufsize <= 32768;
515 * The following macros call either the 16-bit or the 32-bit version of a
516 * matchfinder function based on the value of 'is_16_bit', which will be known
517 * at compilation time.
520 #define CALL_HC_MF(is_16_bit, c, funcname, ...) \
521 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
522 CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
524 #define CALL_BT_MF(is_16_bit, c, funcname, ...) \
525 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
526 CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
529 * Structure to keep track of the current state of sending bits to the
530 * compressed output buffer.
532 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
534 struct lzx_output_bitstream {
536 /* Bits that haven't yet been written to the output buffer. */
537 machine_word_t bitbuf;
539 /* Number of bits currently held in @bitbuf. */
542 /* Pointer to the start of the output buffer. */
545 /* Pointer to the position in the output buffer at which the next coding
546 * unit should be written. */
549 /* Pointer just past the end of the output buffer, rounded down to a
550 * 2-byte boundary. */
554 /* Can the specified number of bits always be added to 'bitbuf' after any
555 * pending 16-bit coding units have been flushed? */
556 #define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16)
559 * Initialize the output bitstream.
562 * The output bitstream structure to initialize.
564 * The buffer being written to.
566 * Size of @buffer, in bytes.
569 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
574 os->next = os->start;
575 os->end = os->start + (size & ~1);
578 /* Add some bits to the bitbuffer variable of the output bitstream. The caller
579 * must make sure there is enough room. */
581 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
583 os->bitbuf = (os->bitbuf << num_bits) | bits;
584 os->bitcount += num_bits;
587 /* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
588 * specifies the maximum number of bits that may have been added since the last
591 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
593 if (os->end - os->next < 6)
595 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
596 if (max_num_bits > 16)
597 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
598 if (max_num_bits > 32)
599 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
600 os->next += (os->bitcount >> 4) << 1;
604 /* Add at most 16 bits to the bitbuffer and flush it. */
606 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
608 lzx_add_bits(os, bits, num_bits);
609 lzx_flush_bits(os, 16);
613 * Flush the last coding unit to the output buffer if needed. Return the total
614 * number of bytes written to the output buffer, or 0 if an overflow occurred.
617 lzx_flush_output(struct lzx_output_bitstream *os)
619 if (os->end - os->next < 6)
622 if (os->bitcount != 0) {
623 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
627 return os->next - os->start;
630 /* Build the main, length, and aligned offset Huffman codes used in LZX.
632 * This takes as input the frequency tables for each code and produces as output
633 * a set of tables that map symbols to codewords and codeword lengths. */
635 lzx_make_huffman_codes(struct lzx_compressor *c)
637 const struct lzx_freqs *freqs = &c->freqs;
638 struct lzx_codes *codes = &c->codes[c->codes_index];
640 STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
641 MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
642 STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
643 LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
644 STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
645 ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
647 make_canonical_huffman_code(c->num_main_syms,
651 codes->codewords.main);
653 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
654 LENGTH_CODEWORD_LIMIT,
657 codes->codewords.len);
659 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
660 ALIGNED_CODEWORD_LIMIT,
663 codes->codewords.aligned);
666 /* Reset the symbol frequencies for the LZX Huffman codes. */
668 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
670 memset(&c->freqs, 0, sizeof(c->freqs));
674 lzx_compute_precode_items(const u8 lens[restrict],
675 const u8 prev_lens[restrict],
676 u32 precode_freqs[restrict],
677 unsigned precode_items[restrict])
686 itemptr = precode_items;
689 while (!((len = lens[run_start]) & 0x80)) {
691 /* len = the length being repeated */
693 /* Find the next run of codeword lengths. */
695 run_end = run_start + 1;
697 /* Fast case for a single length. */
698 if (likely(len != lens[run_end])) {
699 delta = prev_lens[run_start] - len;
702 precode_freqs[delta]++;
708 /* Extend the run. */
711 } while (len == lens[run_end]);
716 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
717 while ((run_end - run_start) >= 20) {
718 extra_bits = min((run_end - run_start) - 20, 0x1f);
720 *itemptr++ = 18 | (extra_bits << 5);
721 run_start += 20 + extra_bits;
724 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
725 if ((run_end - run_start) >= 4) {
726 extra_bits = min((run_end - run_start) - 4, 0xf);
728 *itemptr++ = 17 | (extra_bits << 5);
729 run_start += 4 + extra_bits;
733 /* A run of nonzero lengths. */
735 /* Symbol 19: RLE 4 to 5 of any length at a time. */
736 while ((run_end - run_start) >= 4) {
737 extra_bits = (run_end - run_start) > 4;
738 delta = prev_lens[run_start] - len;
742 precode_freqs[delta]++;
743 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
744 run_start += 4 + extra_bits;
748 /* Output any remaining lengths without RLE. */
749 while (run_start != run_end) {
750 delta = prev_lens[run_start] - len;
753 precode_freqs[delta]++;
759 return itemptr - precode_items;
763 * Output a Huffman code in the compressed form used in LZX.
765 * The Huffman code is represented in the output as a logical series of codeword
766 * lengths from which the Huffman code, which must be in canonical form, can be
769 * The codeword lengths are themselves compressed using a separate Huffman code,
770 * the "precode", which contains a symbol for each possible codeword length in
771 * the larger code as well as several special symbols to represent repeated
772 * codeword lengths (a form of run-length encoding). The precode is itself
773 * constructed in canonical form, and its codeword lengths are represented
774 * literally in 20 4-bit fields that immediately precede the compressed codeword
775 * lengths of the larger code.
777 * Furthermore, the codeword lengths of the larger code are actually represented
778 * as deltas from the codeword lengths of the corresponding code in the previous
782 * Bitstream to which to write the compressed Huffman code.
784 * The codeword lengths, indexed by symbol, in the Huffman code.
786 * The codeword lengths, indexed by symbol, in the corresponding Huffman
787 * code in the previous block, or all zeroes if this is the first block.
789 * The number of symbols in the Huffman code.
792 lzx_write_compressed_code(struct lzx_output_bitstream *os,
793 const u8 lens[restrict],
794 const u8 prev_lens[restrict],
797 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
798 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
799 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
800 unsigned precode_items[num_lens];
801 unsigned num_precode_items;
802 unsigned precode_item;
803 unsigned precode_sym;
805 u8 saved = lens[num_lens];
806 *(u8 *)(lens + num_lens) = 0x80;
808 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
809 precode_freqs[i] = 0;
811 /* Compute the "items" (RLE / literal tokens and extra bits) with which
812 * the codeword lengths in the larger code will be output. */
813 num_precode_items = lzx_compute_precode_items(lens,
818 /* Build the precode. */
819 STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
820 PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
821 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
823 precode_freqs, precode_lens,
826 /* Output the lengths of the codewords in the precode. */
827 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
828 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
830 /* Output the encoded lengths of the codewords in the larger code. */
831 for (i = 0; i < num_precode_items; i++) {
832 precode_item = precode_items[i];
833 precode_sym = precode_item & 0x1F;
834 lzx_add_bits(os, precode_codewords[precode_sym],
835 precode_lens[precode_sym]);
836 if (precode_sym >= 17) {
837 if (precode_sym == 17) {
838 lzx_add_bits(os, precode_item >> 5, 4);
839 } else if (precode_sym == 18) {
840 lzx_add_bits(os, precode_item >> 5, 5);
842 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
843 precode_sym = precode_item >> 6;
844 lzx_add_bits(os, precode_codewords[precode_sym],
845 precode_lens[precode_sym]);
848 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
849 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
852 *(u8 *)(lens + num_lens) = saved;
856 * Write all matches and literal bytes (which were precomputed) in an LZX
857 * compressed block to the output bitstream in the final compressed
861 * The output bitstream.
863 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
864 * LZX_BLOCKTYPE_VERBATIM).
866 * The uncompressed data of the block.
868 * The matches and literals to output, given as a series of sequences.
870 * The main, length, and aligned offset Huffman codes for the current
871 * LZX compressed block.
874 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
875 const u8 *block_data, const struct lzx_sequence sequences[],
876 const struct lzx_codes *codes)
878 const struct lzx_sequence *seq = sequences;
879 u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
882 /* Output the next sequence. */
884 unsigned litrunlen = seq->litrunlen;
886 unsigned main_symbol;
887 unsigned adjusted_length;
889 unsigned offset_slot;
890 unsigned num_extra_bits;
893 /* Output the literal run of the sequence. */
895 if (litrunlen) { /* Is the literal run nonempty? */
897 /* Verify optimization is enabled on 64-bit */
898 STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
899 CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
901 if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
903 /* 64-bit: write 4 literals at a time. */
904 while (litrunlen >= 4) {
905 unsigned lit0 = block_data[0];
906 unsigned lit1 = block_data[1];
907 unsigned lit2 = block_data[2];
908 unsigned lit3 = block_data[3];
909 lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
910 lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
911 lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
912 lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
913 lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
918 unsigned lit = *block_data++;
919 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
921 unsigned lit = *block_data++;
922 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
924 unsigned lit = *block_data++;
925 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
926 lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
928 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
931 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
935 /* 32-bit: write 1 literal at a time. */
937 unsigned lit = *block_data++;
938 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
939 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
940 } while (--litrunlen);
944 /* Was this the last literal run? */
945 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
948 /* Nope; output the match. */
950 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
951 main_symbol = LZX_NUM_CHARS + match_hdr;
952 adjusted_length = seq->adjusted_length;
954 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
956 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
957 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
959 num_extra_bits = lzx_extra_offset_bits[offset_slot];
960 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
962 #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
963 14 + ALIGNED_CODEWORD_LIMIT)
965 /* Verify optimization is enabled on 64-bit */
966 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
968 /* Output the main symbol for the match. */
970 lzx_add_bits(os, codes->codewords.main[main_symbol],
971 codes->lens.main[main_symbol]);
972 if (!CAN_BUFFER(MAX_MATCH_BITS))
973 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
975 /* If needed, output the length symbol for the match. */
977 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
978 lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
979 codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
980 if (!CAN_BUFFER(MAX_MATCH_BITS))
981 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
984 /* Output the extra offset bits for the match. In aligned
985 * offset blocks, the lowest 3 bits of the adjusted offset are
986 * Huffman-encoded using the aligned offset code, provided that
987 * there are at least extra 3 offset bits required. All other
988 * extra offset bits are output verbatim. */
990 if ((adjusted_offset & ones_if_aligned) >= 16) {
992 lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
993 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
994 if (!CAN_BUFFER(MAX_MATCH_BITS))
995 lzx_flush_bits(os, 14);
997 lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
998 codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
999 if (!CAN_BUFFER(MAX_MATCH_BITS))
1000 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1002 lzx_add_bits(os, extra_bits, num_extra_bits);
1003 if (!CAN_BUFFER(MAX_MATCH_BITS))
1004 lzx_flush_bits(os, 17);
1007 if (CAN_BUFFER(MAX_MATCH_BITS))
1008 lzx_flush_bits(os, MAX_MATCH_BITS);
1010 /* Advance to the next sequence. */
1016 lzx_write_compressed_block(const u8 *block_begin,
1019 unsigned window_order,
1020 unsigned num_main_syms,
1021 const struct lzx_sequence sequences[],
1022 const struct lzx_codes * codes,
1023 const struct lzx_lens * prev_lens,
1024 struct lzx_output_bitstream * os)
1026 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
1027 block_type == LZX_BLOCKTYPE_VERBATIM);
1029 /* The first three bits indicate the type of block and are one of the
1030 * LZX_BLOCKTYPE_* constants. */
1031 lzx_write_bits(os, block_type, 3);
1033 /* Output the block size.
1035 * The original LZX format seemed to always encode the block size in 3
1036 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
1037 * uses the first bit to indicate whether the block is the default size
1038 * (32768) or a different size given explicitly by the next 16 bits.
1040 * By default, this compressor uses a window size of 32768 and therefore
1041 * follows the WIMGAPI behavior. However, this compressor also supports
1042 * window sizes greater than 32768 bytes, which do not appear to be
1043 * supported by WIMGAPI. In such cases, we retain the default size bit
1044 * to mean a size of 32768 bytes but output non-default block size in 24
1045 * bits rather than 16. The compatibility of this behavior is unknown
1046 * because WIMs created with chunk size greater than 32768 can seemingly
1047 * only be opened by wimlib anyway. */
1048 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1049 lzx_write_bits(os, 1, 1);
1051 lzx_write_bits(os, 0, 1);
1053 if (window_order >= 16)
1054 lzx_write_bits(os, block_size >> 16, 8);
1056 lzx_write_bits(os, block_size & 0xFFFF, 16);
1059 /* If it's an aligned offset block, output the aligned offset code. */
1060 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1061 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1062 lzx_write_bits(os, codes->lens.aligned[i],
1063 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1067 /* Output the main code (two parts). */
1068 lzx_write_compressed_code(os, codes->lens.main,
1071 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1072 prev_lens->main + LZX_NUM_CHARS,
1073 num_main_syms - LZX_NUM_CHARS);
1075 /* Output the length code. */
1076 lzx_write_compressed_code(os, codes->lens.len,
1078 LZX_LENCODE_NUM_SYMBOLS);
1080 /* Output the compressed matches and literals. */
1081 lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1084 /* Given the frequencies of symbols in an LZX-compressed block and the
1085 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1086 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1087 * will take fewer bits to output. */
1089 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1090 const struct lzx_codes * codes)
1092 u32 aligned_cost = 0;
1093 u32 verbatim_cost = 0;
1095 /* A verbatim block requires 3 bits in each place that an aligned symbol
1096 * would be used in an aligned offset block. */
1097 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1098 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1099 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1102 /* Account for output of the aligned offset code. */
1103 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1105 if (aligned_cost < verbatim_cost)
1106 return LZX_BLOCKTYPE_ALIGNED;
1108 return LZX_BLOCKTYPE_VERBATIM;
1112 * Return the offset slot for the specified adjusted match offset, using the
1113 * compressor's acceleration tables to speed up the mapping.
1115 static inline unsigned
1116 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1119 if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1120 return c->offset_slot_tab_1[adjusted_offset];
1121 return c->offset_slot_tab_2[adjusted_offset >> 14];
1125 * Finish an LZX block:
1127 * - build the Huffman codes
1128 * - decide whether to output the block as VERBATIM or ALIGNED
1129 * - output the block
1130 * - swap the indices of the current and previous Huffman codes
1133 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1134 const u8 *block_begin, u32 block_size, u32 seq_idx)
1138 lzx_make_huffman_codes(c);
1140 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1141 &c->codes[c->codes_index]);
1142 lzx_write_compressed_block(block_begin,
1147 &c->chosen_sequences[seq_idx],
1148 &c->codes[c->codes_index],
1149 &c->codes[c->codes_index ^ 1].lens,
1151 c->codes_index ^= 1;
1154 /* Tally the Huffman symbol for a literal and increment the literal run length.
1157 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1159 c->freqs.main[literal]++;
1163 /* Tally the Huffman symbol for a match, save the match data and the length of
1164 * the preceding literal run in the next lzx_sequence, and update the recent
1167 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1168 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1169 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1171 u32 litrunlen = *litrunlen_p;
1172 struct lzx_sequence *next_seq = *next_seq_p;
1173 unsigned offset_slot;
1176 v = length - LZX_MIN_MATCH_LEN;
1178 /* Save the literal run length and adjusted length. */
1179 next_seq->litrunlen = litrunlen;
1180 next_seq->adjusted_length = v;
1182 /* Compute the length header and tally the length symbol if needed */
1183 if (v >= LZX_NUM_PRIMARY_LENS) {
1184 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1185 v = LZX_NUM_PRIMARY_LENS;
1188 /* Compute the offset slot */
1189 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1191 /* Compute the match header. */
1192 v += offset_slot * LZX_NUM_LEN_HEADERS;
1194 /* Save the adjusted offset and match header. */
1195 next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1197 /* Tally the main symbol. */
1198 c->freqs.main[LZX_NUM_CHARS + v]++;
1200 /* Update the recent offsets queue. */
1201 if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1202 /* Repeat offset match */
1203 swap(recent_offsets[0], recent_offsets[offset_data]);
1205 /* Explicit offset match */
1207 /* Tally the aligned offset symbol if needed */
1208 if (offset_data >= 16)
1209 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1211 recent_offsets[2] = recent_offsets[1];
1212 recent_offsets[1] = recent_offsets[0];
1213 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1216 /* Reset the literal run length and advance to the next sequence. */
1217 *next_seq_p = next_seq + 1;
1221 /* Finish the last lzx_sequence. The last lzx_sequence is just a literal run;
1222 * there is no match. This literal run may be empty. */
1224 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1226 last_seq->litrunlen = litrunlen;
1228 /* Special value to mark last sequence */
1229 last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1233 * Given the minimum-cost path computed through the item graph for the current
1234 * block, walk the path and count how many of each symbol in each Huffman-coded
1235 * alphabet would be required to output the items (matches and literals) along
1238 * Note that the path will be walked backwards (from the end of the block to the
1239 * beginning of the block), but this doesn't matter because this function only
1240 * computes frequencies.
1243 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1245 u32 node_idx = block_size;
1250 unsigned offset_slot;
1252 /* Tally literals until either a match or the beginning of the
1253 * block is reached. */
1255 u32 item = c->optimum_nodes[node_idx].item;
1257 len = item & OPTIMUM_LEN_MASK;
1258 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1260 if (len != 0) /* Not a literal? */
1263 /* Tally the main symbol for the literal. */
1264 c->freqs.main[offset_data]++;
1266 if (--node_idx == 0) /* Beginning of block was reached? */
1272 /* Tally a match. */
1274 /* Tally the aligned offset symbol if needed. */
1275 if (offset_data >= 16)
1276 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1278 /* Tally the length symbol if needed. */
1279 v = len - LZX_MIN_MATCH_LEN;;
1280 if (v >= LZX_NUM_PRIMARY_LENS) {
1281 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1282 v = LZX_NUM_PRIMARY_LENS;
1285 /* Tally the main symbol. */
1286 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1287 v += offset_slot * LZX_NUM_LEN_HEADERS;
1288 c->freqs.main[LZX_NUM_CHARS + v]++;
1290 if (node_idx == 0) /* Beginning of block was reached? */
1296 * Like lzx_tally_item_list(), but this function also generates the list of
1297 * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1298 * ready to be output to the bitstream after the Huffman codes are computed.
1299 * The lzx_sequences will be written to decreasing memory addresses as the path
1300 * is walked backwards, which means they will end up in the expected
1301 * first-to-last order. The return value is the index in c->chosen_sequences at
1302 * which the lzx_sequences begin.
1305 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1307 u32 node_idx = block_size;
1308 u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1311 /* Special value to mark last sequence */
1312 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1314 lit_start_node = node_idx;
1319 unsigned offset_slot;
1321 /* Record literals until either a match or the beginning of the
1322 * block is reached. */
1324 u32 item = c->optimum_nodes[node_idx].item;
1326 len = item & OPTIMUM_LEN_MASK;
1327 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1329 if (len != 0) /* Not a literal? */
1332 /* Tally the main symbol for the literal. */
1333 c->freqs.main[offset_data]++;
1335 if (--node_idx == 0) /* Beginning of block was reached? */
1339 /* Save the literal run length for the next sequence (the
1340 * "previous sequence" when walking backwards). */
1341 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1343 lit_start_node = node_idx;
1345 /* Record a match. */
1347 /* Tally the aligned offset symbol if needed. */
1348 if (offset_data >= 16)
1349 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1351 /* Save the adjusted length. */
1352 v = len - LZX_MIN_MATCH_LEN;
1353 c->chosen_sequences[seq_idx].adjusted_length = v;
1355 /* Tally the length symbol if needed. */
1356 if (v >= LZX_NUM_PRIMARY_LENS) {
1357 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1358 v = LZX_NUM_PRIMARY_LENS;
1361 /* Tally the main symbol. */
1362 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1363 v += offset_slot * LZX_NUM_LEN_HEADERS;
1364 c->freqs.main[LZX_NUM_CHARS + v]++;
1366 /* Save the adjusted offset and match header. */
1367 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1368 (offset_data << 9) | v;
1370 if (node_idx == 0) /* Beginning of block was reached? */
1375 /* Save the literal run length for the first sequence. */
1376 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1378 /* Return the index in c->chosen_sequences at which the lzx_sequences
1384 * Find an inexpensive path through the graph of possible match/literal choices
1385 * for the current block. The nodes of the graph are
1386 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1387 * the current block, plus one extra node for end-of-block. The edges of the
1388 * graph are matches and literals. The goal is to find the minimum cost path
1389 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1391 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1392 * proceeding forwards one node at a time. At each node, a selection of matches
1393 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1394 * length 'len' provides a new path to reach the node 'len' bytes later. If
1395 * such a path is the lowest cost found so far to reach that later node, then
1396 * that later node is updated with the new path.
1398 * Note that although this algorithm is based on minimum cost path search, due
1399 * to various simplifying assumptions the result is not guaranteed to be the
1400 * true minimum cost, or "optimal", path over the graph of all valid LZX
1401 * representations of this block.
1403 * Also, note that because of the presence of the recent offsets queue (which is
1404 * a type of adaptive state), the algorithm cannot work backwards and compute
1405 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1406 * algorithm handles this adaptive state in the "minimum cost" parse is actually
1407 * only an approximation. It's possible for the globally optimal, minimum cost
1408 * path to contain a prefix, ending at a position, where that path prefix is
1409 * *not* the minimum cost path to that position. This can happen if such a path
1410 * prefix results in a different adaptive state which results in lower costs
1411 * later. The algorithm does not solve this problem; it only considers the
1412 * lowest cost to reach each individual position.
1414 static inline struct lzx_lru_queue
1415 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1416 const u8 * const restrict block_begin,
1417 const u32 block_size,
1418 const struct lzx_lru_queue initial_queue,
1421 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1422 struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1423 struct lz_match *cache_ptr = c->match_cache;
1424 const u8 *in_next = block_begin;
1425 const u8 * const block_end = block_begin + block_size;
1427 /* Instead of storing the match offset LRU queues in the
1428 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1429 * storing them in a smaller array. This works because the algorithm
1430 * only requires a limited history of the adaptive state. Once a given
1431 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1432 * it is no longer needed. */
1433 struct lzx_lru_queue queues[512];
1435 STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1436 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1438 /* Initially, the cost to reach each node is "infinity". */
1439 memset(c->optimum_nodes, 0xFF,
1440 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1442 QUEUE(block_begin) = initial_queue;
1444 /* The following loop runs 'block_size' iterations, one per node. */
1446 unsigned num_matches;
1451 * A selection of matches for the block was already saved in
1452 * memory so that we don't have to run the uncompressed data
1453 * through the matchfinder on every optimization pass. However,
1454 * we still search for repeat offset matches during each
1455 * optimization pass because we cannot predict the state of the
1456 * recent offsets queue. But as a heuristic, we don't bother
1457 * searching for repeat offset matches if the general-purpose
1458 * matchfinder failed to find any matches.
1460 * Note that a match of length n at some offset implies there is
1461 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1462 * that same offset. In other words, we don't necessarily need
1463 * to use the full length of a match. The key heuristic that
1464 * saves a significicant amount of time is that for each
1465 * distinct length, we only consider the smallest offset for
1466 * which that length is available. This heuristic also applies
1467 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1468 * any explicit offset. Of course, this heuristic may be
1469 * produce suboptimal results because offset slots in LZX are
1470 * subject to entropy encoding, but in practice this is a useful
1474 num_matches = cache_ptr->length;
1478 struct lz_match *end_matches = cache_ptr + num_matches;
1479 unsigned next_len = LZX_MIN_MATCH_LEN;
1480 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1483 /* Consider R0 match */
1484 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1485 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1487 STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1489 u32 cost = cur_node->cost +
1490 c->costs.match_cost[0][
1491 next_len - LZX_MIN_MATCH_LEN];
1492 if (cost <= (cur_node + next_len)->cost) {
1493 (cur_node + next_len)->cost = cost;
1494 (cur_node + next_len)->item =
1495 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1497 if (unlikely(++next_len > max_len)) {
1498 cache_ptr = end_matches;
1501 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1505 /* Consider R1 match */
1506 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1507 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1509 if (matchptr[next_len - 1] != in_next[next_len - 1])
1511 for (unsigned len = 2; len < next_len - 1; len++)
1512 if (matchptr[len] != in_next[len])
1515 u32 cost = cur_node->cost +
1516 c->costs.match_cost[1][
1517 next_len - LZX_MIN_MATCH_LEN];
1518 if (cost <= (cur_node + next_len)->cost) {
1519 (cur_node + next_len)->cost = cost;
1520 (cur_node + next_len)->item =
1521 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1523 if (unlikely(++next_len > max_len)) {
1524 cache_ptr = end_matches;
1527 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1531 /* Consider R2 match */
1532 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1533 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1535 if (matchptr[next_len - 1] != in_next[next_len - 1])
1537 for (unsigned len = 2; len < next_len - 1; len++)
1538 if (matchptr[len] != in_next[len])
1541 u32 cost = cur_node->cost +
1542 c->costs.match_cost[2][
1543 next_len - LZX_MIN_MATCH_LEN];
1544 if (cost <= (cur_node + next_len)->cost) {
1545 (cur_node + next_len)->cost = cost;
1546 (cur_node + next_len)->item =
1547 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1549 if (unlikely(++next_len > max_len)) {
1550 cache_ptr = end_matches;
1553 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1557 while (next_len > cache_ptr->length)
1558 if (++cache_ptr == end_matches)
1561 /* Consider explicit offset matches */
1563 u32 offset = cache_ptr->offset;
1564 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1565 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1568 u32 cost = cur_node->cost +
1569 c->costs.match_cost[offset_slot][
1570 next_len - LZX_MIN_MATCH_LEN];
1571 #if LZX_CONSIDER_ALIGNED_COSTS
1572 if (lzx_extra_offset_bits[offset_slot] >=
1573 LZX_NUM_ALIGNED_OFFSET_BITS)
1574 cost += c->costs.aligned[offset_data &
1575 LZX_ALIGNED_OFFSET_BITMASK];
1577 if (cost < (cur_node + next_len)->cost) {
1578 (cur_node + next_len)->cost = cost;
1579 (cur_node + next_len)->item =
1580 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1582 } while (++next_len <= cache_ptr->length);
1583 } while (++cache_ptr != end_matches);
1588 /* Consider coding a literal.
1590 * To avoid an extra branch, actually checking the preferability
1591 * of coding the literal is integrated into the queue update
1593 literal = *in_next++;
1594 cost = cur_node->cost +
1595 c->costs.main[lzx_main_symbol_for_literal(literal)];
1597 /* Advance to the next position. */
1600 /* The lowest-cost path to the current position is now known.
1601 * Finalize the recent offsets queue that results from taking
1602 * this lowest-cost path. */
1604 if (cost <= cur_node->cost) {
1605 /* Literal: queue remains unchanged. */
1606 cur_node->cost = cost;
1607 cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1608 QUEUE(in_next) = QUEUE(in_next - 1);
1610 /* Match: queue update is needed. */
1611 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1612 u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1613 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1614 /* Explicit offset match: insert offset at front */
1616 lzx_lru_queue_push(QUEUE(in_next - len),
1617 offset_data - LZX_OFFSET_ADJUSTMENT);
1619 /* Repeat offset match: swap offset to front */
1621 lzx_lru_queue_swap(QUEUE(in_next - len),
1625 } while (cur_node != end_node);
1627 /* Return the match offset queue at the end of the minimum cost path. */
1628 return QUEUE(block_end);
1631 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1633 lzx_compute_match_costs(struct lzx_compressor *c)
1635 unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1636 struct lzx_costs *costs = &c->costs;
1638 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1640 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1641 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1644 #if LZX_CONSIDER_ALIGNED_COSTS
1645 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1646 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1649 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1650 costs->match_cost[offset_slot][i] =
1651 costs->main[main_symbol++] + extra_cost;
1653 extra_cost += costs->main[main_symbol];
1655 for (; i < LZX_NUM_LENS; i++)
1656 costs->match_cost[offset_slot][i] =
1657 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1661 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1664 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1667 bool have_byte[256];
1668 unsigned num_used_bytes;
1670 /* The costs below are hard coded to use a scaling factor of 16. */
1671 STATIC_ASSERT(LZX_BIT_COST == 16);
1676 * - Use smaller initial costs for literal symbols when the input buffer
1677 * contains fewer distinct bytes.
1679 * - Assume that match symbols are more costly than literal symbols.
1681 * - Assume that length symbols for shorter lengths are less costly than
1682 * length symbols for longer lengths.
1685 for (i = 0; i < 256; i++)
1686 have_byte[i] = false;
1688 for (i = 0; i < block_size; i++)
1689 have_byte[block[i]] = true;
1692 for (i = 0; i < 256; i++)
1693 num_used_bytes += have_byte[i];
1695 for (i = 0; i < 256; i++)
1696 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1698 for (; i < c->num_main_syms; i++)
1699 c->costs.main[i] = 170;
1701 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1702 c->costs.len[i] = 103 + (i / 4);
1704 #if LZX_CONSIDER_ALIGNED_COSTS
1705 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1706 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1709 lzx_compute_match_costs(c);
1712 /* Update the current cost model to reflect the computed Huffman codes. */
1714 lzx_update_costs(struct lzx_compressor *c)
1717 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1719 for (i = 0; i < c->num_main_syms; i++)
1720 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1722 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1723 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1725 #if LZX_CONSIDER_ALIGNED_COSTS
1726 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1727 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1730 lzx_compute_match_costs(c);
1733 static inline struct lzx_lru_queue
1734 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1735 struct lzx_output_bitstream * const restrict os,
1736 const u8 * const restrict block_begin,
1737 const u32 block_size,
1738 const struct lzx_lru_queue initial_queue,
1741 unsigned num_passes_remaining = c->num_optim_passes;
1742 struct lzx_lru_queue new_queue;
1745 /* The first optimization pass uses a default cost model. Each
1746 * additional optimization pass uses a cost model derived from the
1747 * Huffman code computed in the previous pass. */
1749 lzx_set_default_costs(c, block_begin, block_size);
1750 lzx_reset_symbol_frequencies(c);
1752 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1753 initial_queue, is_16_bit);
1754 if (num_passes_remaining > 1) {
1755 lzx_tally_item_list(c, block_size, is_16_bit);
1756 lzx_make_huffman_codes(c);
1757 lzx_update_costs(c);
1758 lzx_reset_symbol_frequencies(c);
1760 } while (--num_passes_remaining);
1762 seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1763 lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1768 * This is the "near-optimal" LZX compressor.
1770 * For each block, it performs a relatively thorough graph search to find an
1771 * inexpensive (in terms of compressed size) way to output that block.
1773 * Note: there are actually many things this algorithm leaves on the table in
1774 * terms of compression ratio. So although it may be "near-optimal", it is
1775 * certainly not "optimal". The goal is not to produce the optimal compression
1776 * ratio, which for LZX is probably impossible within any practical amount of
1777 * time, but rather to produce a compression ratio significantly better than a
1778 * simpler "greedy" or "lazy" parse while still being relatively fast.
1781 lzx_compress_near_optimal(struct lzx_compressor *c,
1782 struct lzx_output_bitstream *os,
1785 const u8 * const in_begin = c->in_buffer;
1786 const u8 * in_next = in_begin;
1787 const u8 * const in_end = in_begin + c->in_nbytes;
1788 unsigned max_len = LZX_MAX_MATCH_LEN;
1789 unsigned nice_len = min(c->nice_match_length, max_len);
1791 struct lzx_lru_queue queue;
1793 CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
1794 memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1795 next_hash = bt_matchfinder_hash_3_bytes(in_next);
1796 lzx_lru_queue_init(&queue);
1799 /* Starting a new block */
1800 const u8 * const in_block_begin = in_next;
1801 const u8 * const in_block_end =
1802 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1804 /* Run the block through the matchfinder and cache the matches. */
1805 struct lz_match *cache_ptr = c->match_cache;
1807 struct lz_match *lz_matchptr;
1812 /* If approaching the end of the input buffer, adjust
1813 * 'max_len' and 'nice_len' accordingly. */
1814 if (unlikely(max_len > in_end - in_next)) {
1815 max_len = in_end - in_next;
1816 nice_len = min(max_len, nice_len);
1818 /* This extra check is needed to ensure that we
1819 * never output a length 2 match of the very
1820 * last two bytes with the very first two bytes,
1821 * since such a match has an offset too large to
1822 * be represented. */
1823 if (unlikely(max_len < 3)) {
1825 cache_ptr->length = 0;
1831 lz_matchptr = cache_ptr + 1;
1833 /* Check for a length 2 match. */
1834 hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1835 cur_match = c->hash2_tab[hash2];
1836 c->hash2_tab[hash2] = in_next - in_begin;
1837 if (cur_match != 0 &&
1838 (LZX_HASH2_ORDER == 16 ||
1839 load_u16_unaligned(&in_begin[cur_match]) ==
1840 load_u16_unaligned(in_next)))
1842 lz_matchptr->length = 2;
1843 lz_matchptr->offset = in_next - &in_begin[cur_match];
1847 /* Check for matches of length >= 3. */
1848 lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches,
1854 c->max_search_depth,
1859 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1860 cache_ptr = lz_matchptr;
1863 * If there was a very long match found, then don't
1864 * cache any matches for the bytes covered by that
1865 * match. This avoids degenerate behavior when
1866 * compressing highly redundant data, where the number
1867 * of matches can be very large.
1869 * This heuristic doesn't actually hurt the compression
1870 * ratio very much. If there's a long match, then the
1871 * data must be highly compressible, so it doesn't
1872 * matter as much what we do.
1874 if (best_len >= nice_len) {
1877 if (unlikely(max_len > in_end - in_next)) {
1878 max_len = in_end - in_next;
1879 nice_len = min(max_len, nice_len);
1880 if (unlikely(max_len < 3)) {
1882 cache_ptr->length = 0;
1887 c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1889 CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position,
1894 c->max_search_depth,
1897 cache_ptr->length = 0;
1899 } while (--best_len);
1901 } while (in_next < in_block_end &&
1902 likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1904 /* We've finished running the block through the matchfinder.
1905 * Now choose a match/literal sequence and write the block. */
1907 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1908 in_next - in_block_begin,
1910 } while (in_next != in_end);
1914 lzx_compress_near_optimal_16(struct lzx_compressor *c,
1915 struct lzx_output_bitstream *os)
1917 lzx_compress_near_optimal(c, os, true);
1921 lzx_compress_near_optimal_32(struct lzx_compressor *c,
1922 struct lzx_output_bitstream *os)
1924 lzx_compress_near_optimal(c, os, false);
1928 * Given a pointer to the current byte sequence and the current list of recent
1929 * match offsets, find the longest repeat offset match.
1931 * If no match of at least 2 bytes is found, then return 0.
1933 * If a match of at least 2 bytes is found, then return its length and set
1934 * *rep_max_idx_ret to the index of its offset in @queue.
1937 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1938 const u32 bytes_remaining,
1939 const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1940 unsigned *rep_max_idx_ret)
1942 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1943 LZX_ASSERT(bytes_remaining >= 2);
1945 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1946 const u16 next_2_bytes = load_u16_unaligned(in_next);
1948 unsigned rep_max_len;
1949 unsigned rep_max_idx;
1952 matchptr = in_next - recent_offsets[0];
1953 if (load_u16_unaligned(matchptr) == next_2_bytes)
1954 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1959 matchptr = in_next - recent_offsets[1];
1960 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1961 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1962 if (rep_len > rep_max_len) {
1963 rep_max_len = rep_len;
1968 matchptr = in_next - recent_offsets[2];
1969 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1970 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1971 if (rep_len > rep_max_len) {
1972 rep_max_len = rep_len;
1977 *rep_max_idx_ret = rep_max_idx;
1981 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
1982 static inline unsigned
1983 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1985 unsigned score = len;
1987 if (adjusted_offset < 4096)
1990 if (adjusted_offset < 256)
1996 static inline unsigned
1997 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2002 /* This is the "lazy" LZX compressor. */
2004 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2007 const u8 * const in_begin = c->in_buffer;
2008 const u8 * in_next = in_begin;
2009 const u8 * const in_end = in_begin + c->in_nbytes;
2010 unsigned max_len = LZX_MAX_MATCH_LEN;
2011 unsigned nice_len = min(c->nice_match_length, max_len);
2012 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2013 u32 recent_offsets[3] = {1, 1, 1};
2014 u32 next_hashes[2] = {};
2016 CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2019 /* Starting a new block */
2021 const u8 * const in_block_begin = in_next;
2022 const u8 * const in_block_end =
2023 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
2024 struct lzx_sequence *next_seq = c->chosen_sequences;
2027 u32 cur_offset_data;
2031 u32 next_offset_data;
2032 unsigned next_score;
2033 unsigned rep_max_len;
2034 unsigned rep_max_idx;
2039 lzx_reset_symbol_frequencies(c);
2042 if (unlikely(max_len > in_end - in_next)) {
2043 max_len = in_end - in_next;
2044 nice_len = min(max_len, nice_len);
2047 /* Find the longest match at the current position. */
2049 cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
2055 c->max_search_depth,
2060 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2061 cur_offset != recent_offsets[0] &&
2062 cur_offset != recent_offsets[1] &&
2063 cur_offset != recent_offsets[2]))
2065 /* There was no match found, or the only match found
2066 * was a distant length 3 match. Output a literal. */
2067 lzx_record_literal(c, *in_next++, &litrunlen);
2071 if (cur_offset == recent_offsets[0]) {
2073 cur_offset_data = 0;
2074 skip_len = cur_len - 1;
2075 goto choose_cur_match;
2078 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2079 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2081 /* Consider a repeat offset match */
2082 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2088 if (rep_max_len >= 3 &&
2089 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2090 rep_max_idx)) >= cur_score)
2092 cur_len = rep_max_len;
2093 cur_offset_data = rep_max_idx;
2094 skip_len = rep_max_len - 1;
2095 goto choose_cur_match;
2100 /* We have a match at the current position. */
2102 /* If we have a very long match, choose it immediately. */
2103 if (cur_len >= nice_len) {
2104 skip_len = cur_len - 1;
2105 goto choose_cur_match;
2108 /* See if there's a better match at the next position. */
2110 if (unlikely(max_len > in_end - in_next)) {
2111 max_len = in_end - in_next;
2112 nice_len = min(max_len, nice_len);
2115 next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
2121 c->max_search_depth / 2,
2125 if (next_len <= cur_len - 2) {
2127 skip_len = cur_len - 2;
2128 goto choose_cur_match;
2131 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2132 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2134 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2140 if (rep_max_len >= 3 &&
2141 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2142 rep_max_idx)) >= next_score)
2145 if (rep_score > cur_score) {
2146 /* The next match is better, and it's a
2147 * repeat offset match. */
2148 lzx_record_literal(c, *(in_next - 2),
2150 cur_len = rep_max_len;
2151 cur_offset_data = rep_max_idx;
2152 skip_len = cur_len - 1;
2153 goto choose_cur_match;
2156 if (next_score > cur_score) {
2157 /* The next match is better, and it's an
2158 * explicit offset match. */
2159 lzx_record_literal(c, *(in_next - 2),
2162 cur_offset_data = next_offset_data;
2163 cur_score = next_score;
2164 goto have_cur_match;
2168 /* The original match was better. */
2169 skip_len = cur_len - 2;
2172 lzx_record_match(c, cur_len, cur_offset_data,
2173 recent_offsets, is_16_bit,
2174 &litrunlen, &next_seq);
2175 in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions,
2181 } while (in_next < in_block_end);
2183 lzx_finish_sequence(next_seq, litrunlen);
2185 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2187 } while (in_next != in_end);
2191 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2193 lzx_compress_lazy(c, os, true);
2197 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2199 lzx_compress_lazy(c, os, false);
2202 /* Generate the acceleration tables for offset slots. */
2204 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2206 u32 adjusted_offset = 0;
2210 for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2213 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2215 c->offset_slot_tab_1[adjusted_offset] = slot;
2218 /* slots [30, 49] */
2219 for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2220 adjusted_offset += (u32)1 << 14)
2222 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2224 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2229 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2231 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2232 if (lzx_is_16_bit(max_bufsize))
2233 return offsetof(struct lzx_compressor, hc_mf_16) +
2234 hc_matchfinder_size_16(max_bufsize);
2236 return offsetof(struct lzx_compressor, hc_mf_32) +
2237 hc_matchfinder_size_32(max_bufsize);
2239 if (lzx_is_16_bit(max_bufsize))
2240 return offsetof(struct lzx_compressor, bt_mf_16) +
2241 bt_matchfinder_size_16(max_bufsize);
2243 return offsetof(struct lzx_compressor, bt_mf_32) +
2244 bt_matchfinder_size_32(max_bufsize);
2249 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2254 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2257 size += lzx_get_compressor_size(max_bufsize, compression_level);
2259 size += max_bufsize; /* in_buffer */
2264 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2265 bool destructive, void **c_ret)
2267 unsigned window_order;
2268 struct lzx_compressor *c;
2270 window_order = lzx_get_window_order(max_bufsize);
2271 if (window_order == 0)
2272 return WIMLIB_ERR_INVALID_PARAM;
2274 c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2278 c->destructive = destructive;
2280 c->num_main_syms = lzx_get_num_main_syms(window_order);
2281 c->window_order = window_order;
2283 if (!c->destructive) {
2284 c->in_buffer = MALLOC(max_bufsize);
2289 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2291 /* Fast compression: Use lazy parsing. */
2293 if (lzx_is_16_bit(max_bufsize))
2294 c->impl = lzx_compress_lazy_16;
2296 c->impl = lzx_compress_lazy_32;
2297 c->max_search_depth = (36 * compression_level) / 20;
2298 c->nice_match_length = (72 * compression_level) / 20;
2300 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2301 * halves the max_search_depth when attempting a lazy match, and
2302 * max_search_depth cannot be 0. */
2303 if (c->max_search_depth < 2)
2304 c->max_search_depth = 2;
2307 /* Normal / high compression: Use near-optimal parsing. */
2309 if (lzx_is_16_bit(max_bufsize))
2310 c->impl = lzx_compress_near_optimal_16;
2312 c->impl = lzx_compress_near_optimal_32;
2314 /* Scale nice_match_length and max_search_depth with the
2315 * compression level. */
2316 c->max_search_depth = (24 * compression_level) / 50;
2317 c->nice_match_length = (32 * compression_level) / 50;
2319 /* Set a number of optimization passes appropriate for the
2320 * compression level. */
2322 c->num_optim_passes = 1;
2324 if (compression_level >= 45)
2325 c->num_optim_passes++;
2327 /* Use more optimization passes for higher compression levels.
2328 * But the more passes there are, the less they help --- so
2329 * don't add them linearly. */
2330 if (compression_level >= 70) {
2331 c->num_optim_passes++;
2332 if (compression_level >= 100)
2333 c->num_optim_passes++;
2334 if (compression_level >= 150)
2335 c->num_optim_passes++;
2336 if (compression_level >= 200)
2337 c->num_optim_passes++;
2338 if (compression_level >= 300)
2339 c->num_optim_passes++;
2343 /* max_search_depth == 0 is invalid. */
2344 if (c->max_search_depth < 1)
2345 c->max_search_depth = 1;
2347 if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2348 c->nice_match_length = LZX_MAX_MATCH_LEN;
2350 lzx_init_offset_slot_tabs(c);
2357 return WIMLIB_ERR_NOMEM;
2361 lzx_compress(const void *restrict in, size_t in_nbytes,
2362 void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2364 struct lzx_compressor *c = _c;
2365 struct lzx_output_bitstream os;
2368 /* Don't bother trying to compress very small inputs. */
2369 if (in_nbytes < 100)
2372 /* Copy the input data into the internal buffer and preprocess it. */
2374 c->in_buffer = (void *)in;
2376 memcpy(c->in_buffer, in, in_nbytes);
2377 c->in_nbytes = in_nbytes;
2378 lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2380 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2382 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2384 /* Initialize the output bitstream. */
2385 lzx_init_output(&os, out, out_nbytes_avail);
2387 /* Call the compression level-specific compress() function. */
2390 /* Flush the output bitstream and return the compressed size or 0. */
2391 result = lzx_flush_output(&os);
2392 if (!result && c->destructive)
2393 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2398 lzx_free_compressor(void *_c)
2400 struct lzx_compressor *c = _c;
2402 if (!c->destructive)
2407 const struct compressor_ops lzx_compressor_ops = {
2408 .get_needed_memory = lzx_get_needed_memory,
2409 .create_compressor = lzx_create_compressor,
2410 .compress = lzx_compress,
2411 .free_compressor = lzx_free_compressor,