4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * Start a new LZX block (with new Huffman codes) after this many bytes.
70 * Note: actual block sizes may slightly exceed this value.
72 * TODO: recursive splitting and cost evaluation might be good for an extremely
73 * high compression mode, but otherwise it is almost always far too slow for how
74 * much it helps. Perhaps some sort of heuristic would be useful?
76 #define LZX_DIV_BLOCK_SIZE 32768
79 * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80 * match cache for each byte position. This value should be high enough so that
81 * nearly the time, all matches found in a given block can fit in the match
82 * cache. However, fallback behavior (immediately terminating the block) on
83 * cache overflow is still required.
85 #define LZX_CACHE_PER_POS 6
88 * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89 * excluding the extra "overflow" entries. The per-position multiplier is '1 +
90 * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91 * overhead of one lz_match per position, used to hold the match count at that
94 #define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
97 * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98 * ever be saved in the match cache for a single position. Since each match we
99 * save for a single position has a distinct length, we can use the number of
100 * possible match lengths in LZX as this bound. This bound is guaranteed to be
101 * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102 * it will never actually be reached.
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
107 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108 * THis makes it possible to consider fractional bit costs.
110 * Note: this is only useful as a statistical trick for when the true costs are
111 * unknown. In reality, each token in LZX requires a whole number of bits to
114 #define LZX_BIT_COST 16
117 * Consideration of aligned offset costs is disabled for now, due to
118 * insufficient benefit gained from the time spent.
120 #define LZX_CONSIDER_ALIGNED_COSTS 0
123 * The maximum compression level at which we use the faster algorithm.
125 #define LZX_MAX_FAST_LEVEL 34
128 * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
129 * for finding length 2 matches. This can be as high as 16 (in which case the
130 * hash function is trivial), but using a smaller hash table actually speeds up
131 * compression due to reduced cache pressure.
133 #define LZX_HASH2_ORDER 12
134 #define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
136 #include "wimlib/lzx_common.h"
139 * The maximum allowed window order for the matchfinder.
141 #define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
145 #include "wimlib/bt_matchfinder.h"
146 #include "wimlib/compress_common.h"
147 #include "wimlib/compressor_ops.h"
148 #include "wimlib/endianness.h"
149 #include "wimlib/error.h"
150 #include "wimlib/hc_matchfinder.h"
151 #include "wimlib/lz_extend.h"
152 #include "wimlib/unaligned.h"
153 #include "wimlib/util.h"
155 struct lzx_output_bitstream;
157 /* Codewords for the LZX Huffman codes. */
158 struct lzx_codewords {
159 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
160 u32 len[LZX_LENCODE_NUM_SYMBOLS];
161 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
164 /* Codeword lengths (in bits) for the LZX Huffman codes.
165 * A zero length means the corresponding codeword has zero frequency. */
167 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
168 u8 len[LZX_LENCODE_NUM_SYMBOLS];
169 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
172 /* Cost model for near-optimal parsing */
175 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
176 * length 'len' match that has an offset belonging to 'offset_slot'. */
177 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
179 /* Cost for each symbol in the main code */
180 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
182 /* Cost for each symbol in the length code */
183 u32 len[LZX_LENCODE_NUM_SYMBOLS];
185 #if LZX_CONSIDER_ALIGNED_COSTS
186 /* Cost for each symbol in the aligned code */
187 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
191 /* Codewords and lengths for the LZX Huffman codes. */
193 struct lzx_codewords codewords;
194 struct lzx_lens lens;
197 /* Symbol frequency counters for the LZX Huffman codes. */
199 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
200 u32 len[LZX_LENCODE_NUM_SYMBOLS];
201 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
204 /* Intermediate LZX match/literal format */
207 /* Bits 0 - 9: Main symbol
208 * Bits 10 - 17: Length symbol
209 * Bits 18 - 22: Number of extra offset bits
210 * Bits 23+ : Extra offset bits */
215 * This structure represents a byte position in the input buffer and a node in
216 * the graph of possible match/literal choices.
218 * Logically, each incoming edge to this node is labeled with a literal or a
219 * match that can be taken to reach this position from an earlier position; and
220 * each outgoing edge from this node is labeled with a literal or a match that
221 * can be taken to advance from this position to a later position.
223 struct lzx_optimum_node {
225 /* The cost, in bits, of the lowest-cost path that has been found to
226 * reach this position. This can change as progressively lower cost
227 * paths are found to reach this position. */
231 * The match or literal that was taken to reach this position. This can
232 * change as progressively lower cost paths are found to reach this
235 * This variable is divided into two bitfields.
238 * Low bits are 1, high bits are the literal.
240 * Explicit offset matches:
241 * Low bits are the match length, high bits are the offset plus 2.
243 * Repeat offset matches:
244 * Low bits are the match length, high bits are the queue index.
247 #define OPTIMUM_OFFSET_SHIFT 9
248 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
249 } _aligned_attribute(8);
252 * Least-recently-used queue for match offsets.
254 * This is represented as a 64-bit integer for efficiency. There are three
255 * offsets of 21 bits each. Bit 64 is garbage.
257 struct lzx_lru_queue {
261 #define LZX_QUEUE64_OFFSET_SHIFT 21
262 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
264 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
265 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
266 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
268 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
269 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
270 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
273 lzx_lru_queue_init(struct lzx_lru_queue *queue)
275 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
276 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
277 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
281 lzx_lru_queue_R0(struct lzx_lru_queue queue)
283 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
287 lzx_lru_queue_R1(struct lzx_lru_queue queue)
289 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
293 lzx_lru_queue_R2(struct lzx_lru_queue queue)
295 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
298 /* Push a match offset onto the front (most recently used) end of the queue. */
299 static inline struct lzx_lru_queue
300 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
302 return (struct lzx_lru_queue) {
303 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
307 /* Pop a match offset off the front (most recently used) end of the queue. */
309 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
311 u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
312 queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
316 /* Swap a match offset to the front of the queue. */
317 static inline struct lzx_lru_queue
318 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
324 return (struct lzx_lru_queue) {
325 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
326 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
327 (queue.R & LZX_QUEUE64_R2_MASK),
330 return (struct lzx_lru_queue) {
331 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
332 (queue.R & LZX_QUEUE64_R1_MASK) |
333 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
337 /* The main LZX compressor structure */
338 struct lzx_compressor {
340 /* The "nice" match length: if a match of this length is found, then
341 * choose it immediately without further consideration. */
342 unsigned nice_match_length;
344 /* The maximum search depth: consider at most this many potential
345 * matches at each position. */
346 unsigned max_search_depth;
348 /* The log base 2 of the LZX window size for LZ match offset encoding
349 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
350 * LZX_MAX_WINDOW_ORDER. */
351 unsigned window_order;
353 /* The number of symbols in the main alphabet. This depends on
354 * @window_order, since @window_order determines the maximum possible
356 unsigned num_main_syms;
358 /* Number of optimization passes per block */
359 unsigned num_optim_passes;
361 /* The preprocessed buffer of data being compressed */
364 /* The number of bytes of data to be compressed, which is the number of
365 * bytes of data in @in_buffer that are actually valid. */
368 /* Pointer to the compress() implementation chosen at allocation time */
369 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
371 /* The Huffman symbol frequency counters for the current block. */
372 struct lzx_freqs freqs;
374 /* The Huffman codes for the current and previous blocks. The one with
375 * index 'codes_index' is for the current block, and the other one is
376 * for the previous block. */
377 struct lzx_codes codes[2];
378 unsigned codes_index;
381 * The match/literal sequence the algorithm chose for the current block.
383 * Notes on how large this array actually needs to be:
385 * - In lzx_compress_near_optimal(), the maximum block size is
386 * 'LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1' bytes. This occurs if
387 * a match of the maximum length is found on the last byte. Although
388 * it is impossible for this particular case to actually result in a
389 * parse of all literals, we reserve this many spaces anyway.
391 * - The worst case for lzx_compress_lazy() is a block of almost all
392 * literals that ends with a series of matches of increasing scores,
393 * causing a sequence of literals to be chosen before the last match
394 * is finally chosen. The number of items actually chosen in this
395 * scenario is limited by the number of distinct match scores that
396 * exist for matches shorter than 'nice_match_length'. Having
397 * 'LZX_MAX_MATCH_LEN - 1' extra spaces is plenty for now.
399 struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1];
401 /* Table mapping match offset => offset slot for small offsets */
402 #define LZX_NUM_FAST_OFFSETS 32768
403 u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
406 /* Data for greedy or lazy parsing */
408 /* Hash chains matchfinder (MUST BE LAST!!!) */
409 struct hc_matchfinder hc_mf;
412 /* Data for near-optimal parsing */
415 * The graph nodes for the current block.
417 * We need at least 'LZX_DIV_BLOCK_SIZE +
418 * LZX_MAX_MATCH_LEN - 1' nodes because that is the
419 * maximum block size that may be used. Add 1 because
420 * we need a node to represent end-of-block.
422 * It is possible that nodes past end-of-block are
423 * accessed during match consideration, but this can
424 * only occur if the block was truncated at
425 * LZX_DIV_BLOCK_SIZE. So the same bound still applies.
426 * Note that since nodes past the end of the block will
427 * never actually have an effect on the items that are
428 * chosen for the block, it makes no difference what
429 * their costs are initialized to (if anything).
431 struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
432 LZX_MAX_MATCH_LEN - 1 + 1];
434 /* The cost model for the current block */
435 struct lzx_costs costs;
438 * Cached matches for the current block. This array
439 * contains the matches that were found at each position
440 * in the block. Specifically, for each position, there
441 * is a special 'struct lz_match' whose 'length' field
442 * contains the number of matches that were found at
443 * that position; this is followed by the matches
444 * themselves, if any, sorted by strictly increasing
445 * length and strictly increasing offset.
447 * Note: in rare cases, there will be a very high number
448 * of matches in the block and this array will overflow.
449 * If this happens, we force the end of the current
450 * block. LZX_CACHE_LENGTH is the length at which we
451 * actually check for overflow. The extra slots beyond
452 * this are enough to absorb the worst case overflow,
453 * which occurs if starting at
454 * &match_cache[LZX_CACHE_LENGTH - 1], we write the
455 * match count header, then write
456 * LZX_MAX_MATCHES_PER_POS matches, then skip searching
457 * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
458 * write the match count header for each.
460 struct lz_match match_cache[LZX_CACHE_LENGTH +
461 LZX_MAX_MATCHES_PER_POS +
462 LZX_MAX_MATCH_LEN - 1];
464 /* Hash table for finding length 2 matches */
465 pos_t hash2_tab[LZX_HASH2_LENGTH]
466 _aligned_attribute(MATCHFINDER_ALIGNMENT);
468 /* Binary trees matchfinder (MUST BE LAST!!!) */
469 struct bt_matchfinder bt_mf;
475 * Structure to keep track of the current state of sending bits to the
476 * compressed output buffer.
478 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
480 struct lzx_output_bitstream {
482 /* Bits that haven't yet been written to the output buffer. */
485 /* Number of bits currently held in @bitbuf. */
488 /* Pointer to the start of the output buffer. */
491 /* Pointer to the position in the output buffer at which the next coding
492 * unit should be written. */
495 /* Pointer past the end of the output buffer. */
500 * Initialize the output bitstream.
503 * The output bitstream structure to initialize.
505 * The buffer being written to.
507 * Size of @buffer, in bytes.
510 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
515 os->next = os->start;
516 os->end = os->start + size / sizeof(le16);
520 * Write some bits to the output bitstream.
522 * The bits are given by the low-order @num_bits bits of @bits. Higher-order
523 * bits in @bits cannot be set. At most 17 bits can be written at once.
525 * @max_num_bits is a compile-time constant that specifies the maximum number of
526 * bits that can ever be written at the call site. Currently, it is used to
527 * optimize away the conditional code for writing a second 16-bit coding unit
528 * when writing fewer than 17 bits.
530 * If the output buffer space is exhausted, then the bits will be ignored, and
531 * lzx_flush_output() will return 0 when it gets called.
534 lzx_write_varbits(struct lzx_output_bitstream *os,
535 const u32 bits, const unsigned num_bits,
536 const unsigned max_num_bits)
538 /* This code is optimized for LZX, which never needs to write more than
539 * 17 bits at once. */
540 LZX_ASSERT(num_bits <= 17);
541 LZX_ASSERT(num_bits <= max_num_bits);
542 LZX_ASSERT(os->bitcount <= 15);
544 /* Add the bits to the bit buffer variable. @bitcount will be at most
545 * 15, so there will be just enough space for the maximum possible
546 * @num_bits of 17. */
547 os->bitcount += num_bits;
548 os->bitbuf = (os->bitbuf << num_bits) | bits;
550 /* Check whether any coding units need to be written. */
551 if (os->bitcount >= 16) {
555 /* Write a coding unit, unless it would overflow the buffer. */
556 if (os->next != os->end)
557 put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
559 /* If writing 17 bits, a second coding unit might need to be
560 * written. But because 'max_num_bits' is a compile-time
561 * constant, the compiler will optimize away this code at most
563 if (max_num_bits == 17 && os->bitcount == 16) {
564 if (os->next != os->end)
565 put_unaligned_u16_le(os->bitbuf, os->next++);
571 /* Use when @num_bits is a compile-time constant. Otherwise use
572 * lzx_write_varbits(). */
574 lzx_write_bits(struct lzx_output_bitstream *os,
575 const u32 bits, const unsigned num_bits)
577 lzx_write_varbits(os, bits, num_bits, num_bits);
581 * Flush the last coding unit to the output buffer if needed. Return the total
582 * number of bytes written to the output buffer, or 0 if an overflow occurred.
585 lzx_flush_output(struct lzx_output_bitstream *os)
587 if (os->next == os->end)
590 if (os->bitcount != 0)
591 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
593 return (const u8 *)os->next - (const u8 *)os->start;
596 /* Build the main, length, and aligned offset Huffman codes used in LZX.
598 * This takes as input the frequency tables for each code and produces as output
599 * a set of tables that map symbols to codewords and codeword lengths. */
601 lzx_make_huffman_codes(struct lzx_compressor *c)
603 const struct lzx_freqs *freqs = &c->freqs;
604 struct lzx_codes *codes = &c->codes[c->codes_index];
606 make_canonical_huffman_code(c->num_main_syms,
607 LZX_MAX_MAIN_CODEWORD_LEN,
610 codes->codewords.main);
612 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
613 LZX_MAX_LEN_CODEWORD_LEN,
616 codes->codewords.len);
618 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
619 LZX_MAX_ALIGNED_CODEWORD_LEN,
622 codes->codewords.aligned);
625 /* Reset the symbol frequencies for the LZX Huffman codes. */
627 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
629 memset(&c->freqs, 0, sizeof(c->freqs));
633 lzx_compute_precode_items(const u8 lens[restrict],
634 const u8 prev_lens[restrict],
635 const unsigned num_lens,
636 u32 precode_freqs[restrict],
637 unsigned precode_items[restrict])
646 itemptr = precode_items;
649 /* Find the next run of codeword lengths. */
651 /* len = the length being repeated */
652 len = lens[run_start];
654 run_end = run_start + 1;
656 /* Fast case for a single length. */
657 if (likely(run_end == num_lens || len != lens[run_end])) {
658 delta = prev_lens[run_start] - len;
661 precode_freqs[delta]++;
667 /* Extend the run. */
670 } while (run_end != num_lens && len == lens[run_end]);
675 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
676 while ((run_end - run_start) >= 20) {
677 extra_bits = min((run_end - run_start) - 20, 0x1f);
679 *itemptr++ = 18 | (extra_bits << 5);
680 run_start += 20 + extra_bits;
683 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
684 if ((run_end - run_start) >= 4) {
685 extra_bits = min((run_end - run_start) - 4, 0xf);
687 *itemptr++ = 17 | (extra_bits << 5);
688 run_start += 4 + extra_bits;
692 /* A run of nonzero lengths. */
694 /* Symbol 19: RLE 4 to 5 of any length at a time. */
695 while ((run_end - run_start) >= 4) {
696 extra_bits = (run_end - run_start) > 4;
697 delta = prev_lens[run_start] - len;
701 precode_freqs[delta]++;
702 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
703 run_start += 4 + extra_bits;
707 /* Output any remaining lengths without RLE. */
708 while (run_start != run_end) {
709 delta = prev_lens[run_start] - len;
712 precode_freqs[delta]++;
716 } while (run_start != num_lens);
718 return itemptr - precode_items;
722 * Output a Huffman code in the compressed form used in LZX.
724 * The Huffman code is represented in the output as a logical series of codeword
725 * lengths from which the Huffman code, which must be in canonical form, can be
728 * The codeword lengths are themselves compressed using a separate Huffman code,
729 * the "precode", which contains a symbol for each possible codeword length in
730 * the larger code as well as several special symbols to represent repeated
731 * codeword lengths (a form of run-length encoding). The precode is itself
732 * constructed in canonical form, and its codeword lengths are represented
733 * literally in 20 4-bit fields that immediately precede the compressed codeword
734 * lengths of the larger code.
736 * Furthermore, the codeword lengths of the larger code are actually represented
737 * as deltas from the codeword lengths of the corresponding code in the previous
741 * Bitstream to which to write the compressed Huffman code.
743 * The codeword lengths, indexed by symbol, in the Huffman code.
745 * The codeword lengths, indexed by symbol, in the corresponding Huffman
746 * code in the previous block, or all zeroes if this is the first block.
748 * The number of symbols in the Huffman code.
751 lzx_write_compressed_code(struct lzx_output_bitstream *os,
752 const u8 lens[restrict],
753 const u8 prev_lens[restrict],
756 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
757 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
758 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
759 unsigned precode_items[num_lens];
760 unsigned num_precode_items;
761 unsigned precode_item;
762 unsigned precode_sym;
765 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
766 precode_freqs[i] = 0;
768 /* Compute the "items" (RLE / literal tokens and extra bits) with which
769 * the codeword lengths in the larger code will be output. */
770 num_precode_items = lzx_compute_precode_items(lens,
776 /* Build the precode. */
777 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
778 LZX_MAX_PRE_CODEWORD_LEN,
779 precode_freqs, precode_lens,
782 /* Output the lengths of the codewords in the precode. */
783 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
784 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
786 /* Output the encoded lengths of the codewords in the larger code. */
787 for (i = 0; i < num_precode_items; i++) {
788 precode_item = precode_items[i];
789 precode_sym = precode_item & 0x1F;
790 lzx_write_varbits(os, precode_codewords[precode_sym],
791 precode_lens[precode_sym],
792 LZX_MAX_PRE_CODEWORD_LEN);
793 if (precode_sym >= 17) {
794 if (precode_sym == 17) {
795 lzx_write_bits(os, precode_item >> 5, 4);
796 } else if (precode_sym == 18) {
797 lzx_write_bits(os, precode_item >> 5, 5);
799 lzx_write_bits(os, (precode_item >> 5) & 1, 1);
800 precode_sym = precode_item >> 6;
801 lzx_write_varbits(os, precode_codewords[precode_sym],
802 precode_lens[precode_sym],
803 LZX_MAX_PRE_CODEWORD_LEN);
809 /* Output a match or literal. */
811 lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
812 unsigned ones_if_aligned, const struct lzx_codes *codes)
814 u64 data = item.data;
815 unsigned main_symbol;
817 unsigned num_extra_bits;
820 main_symbol = data & 0x3FF;
822 lzx_write_varbits(os, codes->codewords.main[main_symbol],
823 codes->lens.main[main_symbol],
824 LZX_MAX_MAIN_CODEWORD_LEN);
826 if (main_symbol < LZX_NUM_CHARS) /* Literal? */
829 len_symbol = (data >> 10) & 0xFF;
831 if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
832 lzx_write_varbits(os, codes->codewords.len[len_symbol],
833 codes->lens.len[len_symbol],
834 LZX_MAX_LEN_CODEWORD_LEN);
837 num_extra_bits = (data >> 18) & 0x1F;
838 if (num_extra_bits == 0) /* Small offset or repeat offset match? */
841 extra_bits = data >> 23;
843 if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
845 /* Aligned offset blocks: The low 3 bits of the extra offset
846 * bits are Huffman-encoded using the aligned offset code. The
847 * remaining bits are output literally. */
849 lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
850 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
851 17 - LZX_NUM_ALIGNED_OFFSET_BITS);
853 lzx_write_varbits(os,
854 codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
855 codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
856 LZX_MAX_ALIGNED_CODEWORD_LEN);
858 /* Verbatim blocks, or fewer than 3 extra bits: All extra
859 * offset bits are output literally. */
860 lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
865 * Write all matches and literal bytes (which were precomputed) in an LZX
866 * compressed block to the output bitstream in the final compressed
870 * The output bitstream.
872 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
873 * LZX_BLOCKTYPE_VERBATIM).
875 * The array of matches/literals to output.
877 * Number of matches/literals to output (length of @items).
879 * The main, length, and aligned offset Huffman codes for the current
880 * LZX compressed block.
883 lzx_write_items(struct lzx_output_bitstream *os, int block_type,
884 const struct lzx_item items[], u32 num_items,
885 const struct lzx_codes *codes)
887 unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
889 for (u32 i = 0; i < num_items; i++)
890 lzx_write_item(os, items[i], ones_if_aligned, codes);
894 lzx_write_compressed_block(int block_type,
896 unsigned window_order,
897 unsigned num_main_syms,
898 const struct lzx_item chosen_items[],
899 u32 num_chosen_items,
900 const struct lzx_codes * codes,
901 const struct lzx_lens * prev_lens,
902 struct lzx_output_bitstream * os)
904 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
905 block_type == LZX_BLOCKTYPE_VERBATIM);
907 /* The first three bits indicate the type of block and are one of the
908 * LZX_BLOCKTYPE_* constants. */
909 lzx_write_bits(os, block_type, 3);
911 /* Output the block size.
913 * The original LZX format seemed to always encode the block size in 3
914 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
915 * uses the first bit to indicate whether the block is the default size
916 * (32768) or a different size given explicitly by the next 16 bits.
918 * By default, this compressor uses a window size of 32768 and therefore
919 * follows the WIMGAPI behavior. However, this compressor also supports
920 * window sizes greater than 32768 bytes, which do not appear to be
921 * supported by WIMGAPI. In such cases, we retain the default size bit
922 * to mean a size of 32768 bytes but output non-default block size in 24
923 * bits rather than 16. The compatibility of this behavior is unknown
924 * because WIMs created with chunk size greater than 32768 can seemingly
925 * only be opened by wimlib anyway. */
926 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
927 lzx_write_bits(os, 1, 1);
929 lzx_write_bits(os, 0, 1);
931 if (window_order >= 16)
932 lzx_write_bits(os, block_size >> 16, 8);
934 lzx_write_bits(os, block_size & 0xFFFF, 16);
937 /* If it's an aligned offset block, output the aligned offset code. */
938 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
939 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
940 lzx_write_bits(os, codes->lens.aligned[i],
941 LZX_ALIGNEDCODE_ELEMENT_SIZE);
945 /* Output the main code (two parts). */
946 lzx_write_compressed_code(os, codes->lens.main,
949 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
950 prev_lens->main + LZX_NUM_CHARS,
951 num_main_syms - LZX_NUM_CHARS);
953 /* Output the length code. */
954 lzx_write_compressed_code(os, codes->lens.len,
956 LZX_LENCODE_NUM_SYMBOLS);
958 /* Output the compressed matches and literals. */
959 lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
962 /* Given the frequencies of symbols in an LZX-compressed block and the
963 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
964 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
965 * will take fewer bits to output. */
967 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
968 const struct lzx_codes * codes)
970 u32 aligned_cost = 0;
971 u32 verbatim_cost = 0;
973 /* A verbatim block requires 3 bits in each place that an aligned symbol
974 * would be used in an aligned offset block. */
975 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
976 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
977 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
980 /* Account for output of the aligned offset code. */
981 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
983 if (aligned_cost < verbatim_cost)
984 return LZX_BLOCKTYPE_ALIGNED;
986 return LZX_BLOCKTYPE_VERBATIM;
990 * Finish an LZX block:
992 * - build the Huffman codes
993 * - decide whether to output the block as VERBATIM or ALIGNED
995 * - swap the indices of the current and previous Huffman codes
998 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
999 u32 block_size, u32 num_chosen_items)
1003 lzx_make_huffman_codes(c);
1005 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1006 &c->codes[c->codes_index]);
1007 lzx_write_compressed_block(block_type,
1013 &c->codes[c->codes_index],
1014 &c->codes[c->codes_index ^ 1].lens,
1016 c->codes_index ^= 1;
1019 /* Return the offset slot for the specified offset, which must be
1020 * less than LZX_NUM_FAST_OFFSETS. */
1021 static inline unsigned
1022 lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset)
1024 LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS);
1025 return c->offset_slot_fast[offset];
1028 /* Tally, and optionally record, the specified literal byte. */
1030 lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
1031 struct lzx_item **next_chosen_item)
1033 unsigned main_symbol = lzx_main_symbol_for_literal(literal);
1035 c->freqs.main[main_symbol]++;
1037 if (next_chosen_item) {
1038 *(*next_chosen_item)++ = (struct lzx_item) {
1039 .data = main_symbol,
1044 /* Tally, and optionally record, the specified repeat offset match. */
1046 lzx_declare_repeat_offset_match(struct lzx_compressor *c,
1047 unsigned len, unsigned rep_index,
1048 struct lzx_item **next_chosen_item)
1050 unsigned len_header;
1051 unsigned len_symbol;
1052 unsigned main_symbol;
1054 if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1055 len_header = len - LZX_MIN_MATCH_LEN;
1056 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1058 len_header = LZX_NUM_PRIMARY_LENS;
1059 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1060 c->freqs.len[len_symbol]++;
1063 main_symbol = lzx_main_symbol_for_match(rep_index, len_header);
1065 c->freqs.main[main_symbol]++;
1067 if (next_chosen_item) {
1068 *(*next_chosen_item)++ = (struct lzx_item) {
1069 .data = (u64)main_symbol | ((u64)len_symbol << 10),
1074 /* Tally, and optionally record, the specified explicit offset match. */
1076 lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
1077 struct lzx_item **next_chosen_item)
1079 unsigned len_header;
1080 unsigned len_symbol;
1081 unsigned main_symbol;
1082 unsigned offset_slot;
1083 unsigned num_extra_bits;
1086 if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1087 len_header = len - LZX_MIN_MATCH_LEN;
1088 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1090 len_header = LZX_NUM_PRIMARY_LENS;
1091 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1092 c->freqs.len[len_symbol]++;
1095 offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1096 lzx_get_offset_slot_fast(c, offset) :
1097 lzx_get_offset_slot(offset);
1099 main_symbol = lzx_main_symbol_for_match(offset_slot, len_header);
1101 c->freqs.main[main_symbol]++;
1103 num_extra_bits = lzx_extra_offset_bits[offset_slot];
1105 if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS)
1106 c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) &
1107 LZX_ALIGNED_OFFSET_BITMASK]++;
1109 if (next_chosen_item) {
1111 extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) -
1112 lzx_offset_slot_base[offset_slot];
1114 BUILD_BUG_ON(LZX_MAINCODE_MAX_NUM_SYMBOLS > (1 << 10));
1115 BUILD_BUG_ON(LZX_LENCODE_NUM_SYMBOLS > (1 << 8));
1116 *(*next_chosen_item)++ = (struct lzx_item) {
1117 .data = (u64)main_symbol |
1118 ((u64)len_symbol << 10) |
1119 ((u64)num_extra_bits << 18) |
1120 ((u64)extra_bits << 23),
1126 /* Tally, and optionally record, the specified match or literal. */
1128 lzx_declare_item(struct lzx_compressor *c, u32 item,
1129 struct lzx_item **next_chosen_item)
1131 u32 len = item & OPTIMUM_LEN_MASK;
1132 u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1135 lzx_declare_literal(c, offset_data, next_chosen_item);
1136 else if (offset_data < LZX_NUM_RECENT_OFFSETS)
1137 lzx_declare_repeat_offset_match(c, len, offset_data,
1140 lzx_declare_explicit_offset_match(c, len,
1141 offset_data - LZX_OFFSET_ADJUSTMENT,
1146 lzx_record_item_list(struct lzx_compressor *c,
1147 struct lzx_optimum_node *cur_node,
1148 struct lzx_item **next_chosen_item)
1150 struct lzx_optimum_node *end_node;
1154 /* The list is currently in reverse order (last item to first item).
1156 end_node = cur_node;
1157 saved_item = cur_node->item;
1160 cur_node -= item & OPTIMUM_LEN_MASK;
1161 saved_item = cur_node->item;
1162 cur_node->item = item;
1163 } while (cur_node != c->optimum_nodes);
1165 /* Walk the list of items from beginning to end, tallying and recording
1168 lzx_declare_item(c, cur_node->item, next_chosen_item);
1169 cur_node += (cur_node->item) & OPTIMUM_LEN_MASK;
1170 } while (cur_node != end_node);
1174 lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node)
1176 /* Since we're just tallying the items, we don't need to reverse the
1177 * list. Processing the items in reverse order is fine. */
1179 lzx_declare_item(c, cur_node->item, NULL);
1180 cur_node -= (cur_node->item & OPTIMUM_LEN_MASK);
1181 } while (cur_node != c->optimum_nodes);
1185 * Find an inexpensive path through the graph of possible match/literal choices
1186 * for the current block. The nodes of the graph are
1187 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1188 * the current block, plus one extra node for end-of-block. The edges of the
1189 * graph are matches and literals. The goal is to find the minimum cost path
1190 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1192 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1193 * proceeding forwards one node at a time. At each node, a selection of matches
1194 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1195 * length 'len' provides a new path to reach the node 'len' bytes later. If
1196 * such a path is the lowest cost found so far to reach that later node, then
1197 * that later node is updated with the new path.
1199 * Note that although this algorithm is based on minimum cost path search, due
1200 * to various simplifying assumptions the result is not guaranteed to be the
1201 * true minimum cost, or "optimal", path over the graph of all valid LZX
1202 * representations of this block.
1204 * Also, note that because of the presence of the recent offsets queue (which is
1205 * a type of adaptive state), the algorithm cannot work backwards and compute
1206 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1207 * algorithm handles this adaptive state in the "minimum-cost" parse is actually
1208 * only an approximation. It's possible for the globally optimal, minimum cost
1209 * path to contain a prefix, ending at a position, where that path prefix is
1210 * *not* the minimum cost path to that position. This can happen if such a path
1211 * prefix results in a different adaptive state which results in lower costs
1212 * later. The algorithm does not solve this problem; it only considers the
1213 * lowest cost to reach each individual position.
1215 static struct lzx_lru_queue
1216 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1217 const u8 * const restrict block_begin,
1218 const u32 block_size,
1219 const struct lzx_lru_queue initial_queue)
1221 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1222 struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1223 struct lz_match *cache_ptr = c->match_cache;
1224 const u8 *in_next = block_begin;
1225 const u8 * const block_end = block_begin + block_size;
1227 /* Instead of storing the match offset LRU queues in the
1228 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1229 * storing them in a smaller array. This works because the algorithm
1230 * only requires a limited history of the adaptive state. Once a given
1231 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1232 * it is no longer needed. */
1233 struct lzx_lru_queue queues[512];
1235 BUILD_BUG_ON(ARRAY_LEN(queues) < LZX_MAX_MATCH_LEN + 1);
1236 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1238 /* Initially, the cost to reach each node is "infinity". */
1239 memset(c->optimum_nodes, 0xFF,
1240 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1242 QUEUE(block_begin) = initial_queue;
1244 /* The following loop runs 'block_size' iterations, one per node. */
1246 unsigned num_matches;
1251 * A selection of matches for the block was already saved in
1252 * memory so that we don't have to run the uncompressed data
1253 * through the matchfinder on every optimization pass. However,
1254 * we still search for repeat offset matches during each
1255 * optimization pass because we cannot predict the state of the
1256 * recent offsets queue. But as a heuristic, we don't bother
1257 * searching for repeat offset matches if the general-purpose
1258 * matchfinder failed to find any matches.
1260 * Note that a match of length n at some offset implies there is
1261 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1262 * that same offset. In other words, we don't necessarily need
1263 * to use the full length of a match. The key heuristic that
1264 * saves a significicant amount of time is that for each
1265 * distinct length, we only consider the smallest offset for
1266 * which that length is available. This heuristic also applies
1267 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1268 * any explicit offset. Of course, this heuristic may be
1269 * produce suboptimal results because offset slots in LZX are
1270 * subject to entropy encoding, but in practice this is a useful
1274 num_matches = cache_ptr->length;
1278 struct lz_match *end_matches = cache_ptr + num_matches;
1279 unsigned next_len = LZX_MIN_MATCH_LEN;
1280 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1283 /* Consider R0 match */
1284 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1285 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1287 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1289 u32 cost = cur_node->cost +
1290 c->costs.match_cost[0][
1291 next_len - LZX_MIN_MATCH_LEN];
1292 if (cost <= (cur_node + next_len)->cost) {
1293 (cur_node + next_len)->cost = cost;
1294 (cur_node + next_len)->item =
1295 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1297 if (unlikely(++next_len > max_len)) {
1298 cache_ptr = end_matches;
1301 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1305 /* Consider R1 match */
1306 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1307 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1309 if (matchptr[next_len - 1] != in_next[next_len - 1])
1311 for (unsigned len = 2; len < next_len - 1; len++)
1312 if (matchptr[len] != in_next[len])
1315 u32 cost = cur_node->cost +
1316 c->costs.match_cost[1][
1317 next_len - LZX_MIN_MATCH_LEN];
1318 if (cost <= (cur_node + next_len)->cost) {
1319 (cur_node + next_len)->cost = cost;
1320 (cur_node + next_len)->item =
1321 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1323 if (unlikely(++next_len > max_len)) {
1324 cache_ptr = end_matches;
1327 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1331 /* Consider R2 match */
1332 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1333 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1335 if (matchptr[next_len - 1] != in_next[next_len - 1])
1337 for (unsigned len = 2; len < next_len - 1; len++)
1338 if (matchptr[len] != in_next[len])
1341 u32 cost = cur_node->cost +
1342 c->costs.match_cost[2][
1343 next_len - LZX_MIN_MATCH_LEN];
1344 if (cost <= (cur_node + next_len)->cost) {
1345 (cur_node + next_len)->cost = cost;
1346 (cur_node + next_len)->item =
1347 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1349 if (unlikely(++next_len > max_len)) {
1350 cache_ptr = end_matches;
1353 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1357 while (next_len > cache_ptr->length)
1358 if (++cache_ptr == end_matches)
1361 /* Consider explicit offset matches */
1363 u32 offset = cache_ptr->offset;
1364 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1365 unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1366 lzx_get_offset_slot_fast(c, offset) :
1367 lzx_get_offset_slot(offset);
1369 u32 cost = cur_node->cost +
1370 c->costs.match_cost[offset_slot][
1371 next_len - LZX_MIN_MATCH_LEN];
1372 #if LZX_CONSIDER_ALIGNED_COSTS
1373 if (lzx_extra_offset_bits[offset_slot] >=
1374 LZX_NUM_ALIGNED_OFFSET_BITS)
1375 cost += c->costs.aligned[offset_data &
1376 LZX_ALIGNED_OFFSET_BITMASK];
1378 if (cost < (cur_node + next_len)->cost) {
1379 (cur_node + next_len)->cost = cost;
1380 (cur_node + next_len)->item =
1381 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1383 } while (++next_len <= cache_ptr->length);
1384 } while (++cache_ptr != end_matches);
1389 /* Consider coding a literal.
1391 * To avoid an extra branch, actually checking the preferability
1392 * of coding the literal is integrated into the queue update
1394 literal = *in_next++;
1395 cost = cur_node->cost +
1396 c->costs.main[lzx_main_symbol_for_literal(literal)];
1398 /* Advance to the next position. */
1401 /* The lowest-cost path to the current position is now known.
1402 * Finalize the recent offsets queue that results from taking
1403 * this lowest-cost path. */
1405 if (cost <= cur_node->cost) {
1406 /* Literal: queue remains unchanged. */
1407 cur_node->cost = cost;
1408 cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1;
1409 QUEUE(in_next) = QUEUE(in_next - 1);
1411 /* Match: queue update is needed. */
1412 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1413 u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1414 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1415 /* Explicit offset match: insert offset at front */
1417 lzx_lru_queue_push(QUEUE(in_next - len),
1418 offset_data - LZX_OFFSET_ADJUSTMENT);
1420 /* Repeat offset match: swap offset to front */
1422 lzx_lru_queue_swap(QUEUE(in_next - len),
1426 } while (cur_node != end_node);
1428 /* Return the match offset queue at the end of the minimum-cost path. */
1429 return QUEUE(block_end);
1432 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1434 lzx_compute_match_costs(struct lzx_compressor *c)
1436 unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1437 struct lzx_costs *costs = &c->costs;
1439 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1441 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1442 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1445 #if LZX_CONSIDER_ALIGNED_COSTS
1446 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1447 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1450 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1451 costs->match_cost[offset_slot][i] =
1452 costs->main[main_symbol++] + extra_cost;
1454 extra_cost += costs->main[main_symbol];
1456 for (; i < LZX_NUM_LENS; i++)
1457 costs->match_cost[offset_slot][i] =
1458 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1462 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1465 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1468 bool have_byte[256];
1469 unsigned num_used_bytes;
1471 /* The costs below are hard coded to use a scaling factor of 16. */
1472 BUILD_BUG_ON(LZX_BIT_COST != 16);
1477 * - Use smaller initial costs for literal symbols when the input buffer
1478 * contains fewer distinct bytes.
1480 * - Assume that match symbols are more costly than literal symbols.
1482 * - Assume that length symbols for shorter lengths are less costly than
1483 * length symbols for longer lengths.
1486 for (i = 0; i < 256; i++)
1487 have_byte[i] = false;
1489 for (i = 0; i < block_size; i++)
1490 have_byte[block[i]] = true;
1493 for (i = 0; i < 256; i++)
1494 num_used_bytes += have_byte[i];
1496 for (i = 0; i < 256; i++)
1497 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1499 for (; i < c->num_main_syms; i++)
1500 c->costs.main[i] = 170;
1502 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1503 c->costs.len[i] = 103 + (i / 4);
1505 #if LZX_CONSIDER_ALIGNED_COSTS
1506 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1507 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1510 lzx_compute_match_costs(c);
1513 /* Update the current cost model to reflect the computed Huffman codes. */
1515 lzx_update_costs(struct lzx_compressor *c)
1518 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1520 for (i = 0; i < c->num_main_syms; i++)
1521 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1523 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1524 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1526 #if LZX_CONSIDER_ALIGNED_COSTS
1527 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1528 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1531 lzx_compute_match_costs(c);
1534 static struct lzx_lru_queue
1535 lzx_optimize_and_write_block(struct lzx_compressor *c,
1536 struct lzx_output_bitstream *os,
1537 const u8 *block_begin, const u32 block_size,
1538 const struct lzx_lru_queue initial_queue)
1540 unsigned num_passes_remaining = c->num_optim_passes;
1541 struct lzx_item *next_chosen_item;
1542 struct lzx_lru_queue new_queue;
1544 /* The first optimization pass uses a default cost model. Each
1545 * additional optimization pass uses a cost model derived from the
1546 * Huffman code computed in the previous pass. */
1548 lzx_set_default_costs(c, block_begin, block_size);
1549 lzx_reset_symbol_frequencies(c);
1551 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1553 if (num_passes_remaining > 1) {
1554 lzx_tally_item_list(c, c->optimum_nodes + block_size);
1555 lzx_make_huffman_codes(c);
1556 lzx_update_costs(c);
1557 lzx_reset_symbol_frequencies(c);
1559 } while (--num_passes_remaining);
1561 next_chosen_item = c->chosen_items;
1562 lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item);
1563 lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items);
1568 * This is the "near-optimal" LZX compressor.
1570 * For each block, it performs a relatively thorough graph search to find an
1571 * inexpensive (in terms of compressed size) way to output that block.
1573 * Note: there are actually many things this algorithm leaves on the table in
1574 * terms of compression ratio. So although it may be "near-optimal", it is
1575 * certainly not "optimal". The goal is not to produce the optimal compression
1576 * ratio, which for LZX is probably impossible within any practical amount of
1577 * time, but rather to produce a compression ratio significantly better than a
1578 * simpler "greedy" or "lazy" parse while still being relatively fast.
1581 lzx_compress_near_optimal(struct lzx_compressor *c,
1582 struct lzx_output_bitstream *os)
1584 const u8 * const in_begin = c->in_buffer;
1585 const u8 * in_next = in_begin;
1586 const u8 * const in_end = in_begin + c->in_nbytes;
1587 unsigned max_len = LZX_MAX_MATCH_LEN;
1588 unsigned nice_len = min(c->nice_match_length, max_len);
1590 struct lzx_lru_queue queue;
1592 bt_matchfinder_init(&c->bt_mf);
1593 matchfinder_init(c->hash2_tab, LZX_HASH2_LENGTH);
1594 next_hash = bt_matchfinder_hash_3_bytes(in_next);
1595 lzx_lru_queue_init(&queue);
1598 /* Starting a new block */
1599 const u8 * const in_block_begin = in_next;
1600 const u8 * const in_block_end =
1601 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1603 /* Run the block through the matchfinder and cache the matches. */
1604 struct lz_match *cache_ptr = c->match_cache;
1606 struct lz_match *lz_matchptr;
1611 /* If approaching the end of the input buffer, adjust
1612 * 'max_len' and 'nice_len' accordingly. */
1613 if (unlikely(max_len > in_end - in_next)) {
1614 max_len = in_end - in_next;
1615 nice_len = min(max_len, nice_len);
1617 /* This extra check is needed to ensure that
1618 * reading the next 3 bytes when looking for a
1619 * length 2 match is valid. In addition, we
1620 * cannot allow ourselves to find a length 2
1621 * match of the very last two bytes with the
1622 * very first two bytes, since such a match has
1623 * an offset too large to be represented. */
1624 if (unlikely(max_len < 3)) {
1626 cache_ptr->length = 0;
1632 lz_matchptr = cache_ptr + 1;
1634 /* Check for a length 2 match. */
1635 hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1636 cur_match = c->hash2_tab[hash2];
1637 c->hash2_tab[hash2] = in_next - in_begin;
1638 if (matchfinder_node_valid(cur_match) &&
1639 (LZX_HASH2_ORDER == 16 ||
1640 load_u16_unaligned(&in_begin[cur_match]) ==
1641 load_u16_unaligned(in_next)) &&
1642 in_begin[cur_match + 2] != in_next[2])
1644 lz_matchptr->length = 2;
1645 lz_matchptr->offset = in_next - &in_begin[cur_match];
1649 /* Check for matches of length >= 3. */
1650 lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1656 c->max_search_depth,
1661 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1662 cache_ptr = lz_matchptr;
1665 * If there was a very long match found, then don't
1666 * cache any matches for the bytes covered by that
1667 * match. This avoids degenerate behavior when
1668 * compressing highly redundant data, where the number
1669 * of matches can be very large.
1671 * This heuristic doesn't actually hurt the compression
1672 * ratio very much. If there's a long match, then the
1673 * data must be highly compressible, so it doesn't
1674 * matter as much what we do.
1676 if (best_len >= nice_len) {
1679 if (unlikely(max_len > in_end - in_next)) {
1680 max_len = in_end - in_next;
1681 nice_len = min(max_len, nice_len);
1682 if (unlikely(max_len < 3)) {
1684 cache_ptr->length = 0;
1689 c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1691 bt_matchfinder_skip_position(&c->bt_mf,
1696 c->max_search_depth,
1699 cache_ptr->length = 0;
1701 } while (--best_len);
1703 } while (in_next < in_block_end &&
1704 likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1706 /* We've finished running the block through the matchfinder.
1707 * Now choose a match/literal sequence and write the block. */
1709 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1710 in_next - in_block_begin,
1712 } while (in_next != in_end);
1716 * Given a pointer to the current byte sequence and the current list of recent
1717 * match offsets, find the longest repeat offset match.
1719 * If no match of at least 2 bytes is found, then return 0.
1721 * If a match of at least 2 bytes is found, then return its length and set
1722 * *rep_max_idx_ret to the index of its offset in @queue.
1725 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1726 const u32 bytes_remaining,
1727 struct lzx_lru_queue queue,
1728 unsigned *rep_max_idx_ret)
1730 BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
1731 LZX_ASSERT(bytes_remaining >= 2);
1733 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1734 const u16 next_2_bytes = load_u16_unaligned(in_next);
1736 unsigned rep_max_len;
1737 unsigned rep_max_idx;
1740 matchptr = in_next - lzx_lru_queue_pop(&queue);
1741 if (load_u16_unaligned(matchptr) == next_2_bytes)
1742 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1747 matchptr = in_next - lzx_lru_queue_pop(&queue);
1748 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1749 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1750 if (rep_len > rep_max_len) {
1751 rep_max_len = rep_len;
1756 matchptr = in_next - lzx_lru_queue_pop(&queue);
1757 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1758 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1759 if (rep_len > rep_max_len) {
1760 rep_max_len = rep_len;
1765 *rep_max_idx_ret = rep_max_idx;
1769 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
1770 static inline unsigned
1771 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1773 unsigned score = len;
1775 if (adjusted_offset < 4096)
1778 if (adjusted_offset < 256)
1784 static inline unsigned
1785 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1790 /* This is the "lazy" LZX compressor. */
1792 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1794 const u8 * const in_begin = c->in_buffer;
1795 const u8 * in_next = in_begin;
1796 const u8 * const in_end = in_begin + c->in_nbytes;
1797 unsigned max_len = LZX_MAX_MATCH_LEN;
1798 unsigned nice_len = min(c->nice_match_length, max_len);
1799 struct lzx_lru_queue queue;
1801 hc_matchfinder_init(&c->hc_mf);
1802 lzx_lru_queue_init(&queue);
1805 /* Starting a new block */
1807 const u8 * const in_block_begin = in_next;
1808 const u8 * const in_block_end =
1809 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1810 struct lzx_item *next_chosen_item = c->chosen_items;
1813 u32 cur_offset_data;
1817 u32 next_offset_data;
1818 unsigned next_score;
1819 unsigned rep_max_len;
1820 unsigned rep_max_idx;
1824 lzx_reset_symbol_frequencies(c);
1827 if (unlikely(max_len > in_end - in_next)) {
1828 max_len = in_end - in_next;
1829 nice_len = min(max_len, nice_len);
1832 /* Find the longest match at the current position. */
1834 cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1840 c->max_search_depth,
1844 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
1845 cur_offset != lzx_lru_queue_R0(queue) &&
1846 cur_offset != lzx_lru_queue_R1(queue) &&
1847 cur_offset != lzx_lru_queue_R2(queue)))
1849 /* There was no match found, or the only match found
1850 * was a distant length 3 match. Output a literal. */
1851 lzx_declare_literal(c, *in_next++,
1856 if (cur_offset == lzx_lru_queue_R0(queue)) {
1858 cur_offset_data = 0;
1859 skip_len = cur_len - 1;
1860 goto choose_cur_match;
1863 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
1864 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
1866 /* Consider a repeat offset match */
1867 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1873 if (rep_max_len >= 3 &&
1874 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1875 rep_max_idx)) >= cur_score)
1877 cur_len = rep_max_len;
1878 cur_offset_data = rep_max_idx;
1879 skip_len = rep_max_len - 1;
1880 goto choose_cur_match;
1885 /* We have a match at the current position. */
1887 /* If we have a very long match, choose it immediately. */
1888 if (cur_len >= nice_len) {
1889 skip_len = cur_len - 1;
1890 goto choose_cur_match;
1893 /* See if there's a better match at the next position. */
1895 if (unlikely(max_len > in_end - in_next)) {
1896 max_len = in_end - in_next;
1897 nice_len = min(max_len, nice_len);
1900 next_len = hc_matchfinder_longest_match(&c->hc_mf,
1906 c->max_search_depth / 2,
1909 if (next_len <= cur_len - 2) {
1911 skip_len = cur_len - 2;
1912 goto choose_cur_match;
1915 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
1916 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
1918 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1924 if (rep_max_len >= 3 &&
1925 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1926 rep_max_idx)) >= next_score)
1929 if (rep_score > cur_score) {
1930 /* The next match is better, and it's a
1931 * repeat offset match. */
1932 lzx_declare_literal(c, *(in_next - 2),
1934 cur_len = rep_max_len;
1935 cur_offset_data = rep_max_idx;
1936 skip_len = cur_len - 1;
1937 goto choose_cur_match;
1940 if (next_score > cur_score) {
1941 /* The next match is better, and it's an
1942 * explicit offset match. */
1943 lzx_declare_literal(c, *(in_next - 2),
1946 cur_offset_data = next_offset_data;
1947 cur_score = next_score;
1948 goto have_cur_match;
1952 /* The original match was better. */
1953 skip_len = cur_len - 2;
1956 if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) {
1957 lzx_declare_repeat_offset_match(c, cur_len,
1960 queue = lzx_lru_queue_swap(queue, cur_offset_data);
1962 lzx_declare_explicit_offset_match(c, cur_len,
1963 cur_offset_data - LZX_OFFSET_ADJUSTMENT,
1965 queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT);
1968 hc_matchfinder_skip_positions(&c->hc_mf,
1973 in_next += skip_len;
1974 } while (in_next < in_block_end);
1976 lzx_finish_block(c, os, in_next - in_block_begin,
1977 next_chosen_item - c->chosen_items);
1978 } while (in_next != in_end);
1982 lzx_init_offset_slot_fast(struct lzx_compressor *c)
1986 for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
1988 while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1])
1991 c->offset_slot_fast[offset] = slot;
1996 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
1998 if (compression_level <= LZX_MAX_FAST_LEVEL) {
1999 return offsetof(struct lzx_compressor, hc_mf) +
2000 hc_matchfinder_size(max_bufsize);
2002 return offsetof(struct lzx_compressor, bt_mf) +
2003 bt_matchfinder_size(max_bufsize);
2008 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
2012 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2015 size += lzx_get_compressor_size(max_bufsize, compression_level);
2016 size += max_bufsize; /* in_buffer */
2021 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2024 unsigned window_order;
2025 struct lzx_compressor *c;
2027 window_order = lzx_get_window_order(max_bufsize);
2028 if (window_order == 0)
2029 return WIMLIB_ERR_INVALID_PARAM;
2031 c = ALIGNED_MALLOC(lzx_get_compressor_size(max_bufsize,
2033 MATCHFINDER_ALIGNMENT);
2037 c->num_main_syms = lzx_get_num_main_syms(window_order);
2038 c->window_order = window_order;
2040 c->in_buffer = MALLOC(max_bufsize);
2044 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2046 /* Fast compression: Use lazy parsing. */
2048 c->impl = lzx_compress_lazy;
2049 c->max_search_depth = (36 * compression_level) / 20;
2050 c->nice_match_length = (72 * compression_level) / 20;
2052 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2053 * halves the max_search_depth when attempting a lazy match, and
2054 * max_search_depth cannot be 0. */
2055 if (c->max_search_depth < 2)
2056 c->max_search_depth = 2;
2059 /* Normal / high compression: Use near-optimal parsing. */
2061 c->impl = lzx_compress_near_optimal;
2063 /* Scale nice_match_length and max_search_depth with the
2064 * compression level. */
2065 c->max_search_depth = (24 * compression_level) / 50;
2066 c->nice_match_length = (32 * compression_level) / 50;
2068 /* Set a number of optimization passes appropriate for the
2069 * compression level. */
2071 c->num_optim_passes = 1;
2073 if (compression_level >= 45)
2074 c->num_optim_passes++;
2076 /* Use more optimization passes for higher compression levels.
2077 * But the more passes there are, the less they help --- so
2078 * don't add them linearly. */
2079 if (compression_level >= 70) {
2080 c->num_optim_passes++;
2081 if (compression_level >= 100)
2082 c->num_optim_passes++;
2083 if (compression_level >= 150)
2084 c->num_optim_passes++;
2085 if (compression_level >= 200)
2086 c->num_optim_passes++;
2087 if (compression_level >= 300)
2088 c->num_optim_passes++;
2092 /* max_search_depth == 0 is invalid. */
2093 if (c->max_search_depth < 1)
2094 c->max_search_depth = 1;
2096 if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2097 c->nice_match_length = LZX_MAX_MATCH_LEN;
2099 lzx_init_offset_slot_fast(c);
2106 return WIMLIB_ERR_NOMEM;
2110 lzx_compress(const void *in, size_t in_nbytes,
2111 void *out, size_t out_nbytes_avail, void *_c)
2113 struct lzx_compressor *c = _c;
2114 struct lzx_output_bitstream os;
2116 /* Don't bother trying to compress very small inputs. */
2117 if (in_nbytes < 100)
2120 /* Copy the input data into the internal buffer and preprocess it. */
2121 memcpy(c->in_buffer, in, in_nbytes);
2122 c->in_nbytes = in_nbytes;
2123 lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2125 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2127 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2129 /* Initialize the output bitstream. */
2130 lzx_init_output(&os, out, out_nbytes_avail);
2132 /* Call the compression level-specific compress() function. */
2135 /* Flush the output bitstream and return the compressed size or 0. */
2136 return lzx_flush_output(&os);
2140 lzx_free_compressor(void *_c)
2142 struct lzx_compressor *c = _c;
2148 const struct compressor_ops lzx_compressor_ops = {
2149 .get_needed_memory = lzx_get_needed_memory,
2150 .create_compressor = lzx_create_compressor,
2151 .compress = lzx_compress,
2152 .free_compressor = lzx_free_compressor,