4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * Start a new LZX block (with new Huffman codes) after this many bytes.
70 * Note: actual block sizes may slightly exceed this value.
72 * TODO: recursive splitting and cost evaluation might be good for an extremely
73 * high compression mode, but otherwise it is almost always far too slow for how
74 * much it helps. Perhaps some sort of heuristic would be useful?
76 #define LZX_DIV_BLOCK_SIZE 32768
79 * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80 * match cache for each byte position. This value should be high enough so that
81 * nearly the time, all matches found in a given block can fit in the match
82 * cache. However, fallback behavior on cache overflow is still required.
84 #define LZX_CACHE_PER_POS 6
86 #define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
88 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
91 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
92 * THis makes it possible to consider fractional bit costs.
94 * Note: this is only useful as a statistical trick for when the true costs are
95 * unknown. In reality, each token in LZX requires a whole number of bits to
98 #define LZX_BIT_COST 16
101 * Consideration of aligned offset costs is disabled for now, due to
102 * insufficient benefit gained from the time spent.
104 #define LZX_CONSIDER_ALIGNED_COSTS 0
107 * The maximum compression level at which we use the faster algorithm.
109 #define LZX_MAX_FAST_LEVEL 34
112 * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
113 * for finding length 2 matches. This can be as high as 16 (in which case the
114 * hash function is trivial), but using a smaller hash table actually speeds up
115 * compression due to reduced cache pressure.
117 #define LZX_HASH2_ORDER 12
118 #define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
120 #include "wimlib/lzx_common.h"
123 * The maximum allowed window order for the matchfinder.
125 #define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
129 #include "wimlib/bt_matchfinder.h"
130 #include "wimlib/compress_common.h"
131 #include "wimlib/compressor_ops.h"
132 #include "wimlib/endianness.h"
133 #include "wimlib/error.h"
134 #include "wimlib/hc_matchfinder.h"
135 #include "wimlib/lz_extend.h"
136 #include "wimlib/unaligned.h"
137 #include "wimlib/util.h"
139 struct lzx_output_bitstream;
141 /* Codewords for the LZX Huffman codes. */
142 struct lzx_codewords {
143 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
144 u32 len[LZX_LENCODE_NUM_SYMBOLS];
145 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
148 /* Codeword lengths (in bits) for the LZX Huffman codes.
149 * A zero length means the corresponding codeword has zero frequency. */
151 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
152 u8 len[LZX_LENCODE_NUM_SYMBOLS];
153 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
156 /* Cost model for near-optimal parsing */
159 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
160 * length 'len' match that has an offset belonging to 'offset_slot'. */
161 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
163 /* Cost for each symbol in the main code */
164 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
166 /* Cost for each symbol in the length code */
167 u32 len[LZX_LENCODE_NUM_SYMBOLS];
169 #if LZX_CONSIDER_ALIGNED_COSTS
170 /* Cost for each symbol in the aligned code */
171 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
175 /* Codewords and lengths for the LZX Huffman codes. */
177 struct lzx_codewords codewords;
178 struct lzx_lens lens;
181 /* Symbol frequency counters for the LZX Huffman codes. */
183 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
184 u32 len[LZX_LENCODE_NUM_SYMBOLS];
185 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
188 /* Intermediate LZX match/literal format */
191 /* Bits 0 - 9: Main symbol
192 * Bits 10 - 17: Length symbol
193 * Bits 18 - 22: Number of extra offset bits
194 * Bits 23+ : Extra offset bits */
199 * This structure represents a byte position in the input buffer and a node in
200 * the graph of possible match/literal choices.
202 * Logically, each incoming edge to this node is labeled with a literal or a
203 * match that can be taken to reach this position from an earlier position; and
204 * each outgoing edge from this node is labeled with a literal or a match that
205 * can be taken to advance from this position to a later position.
207 struct lzx_optimum_node {
209 /* The cost, in bits, of the lowest-cost path that has been found to
210 * reach this position. This can change as progressively lower cost
211 * paths are found to reach this position. */
215 * The match or literal that was taken to reach this position. This can
216 * change as progressively lower cost paths are found to reach this
219 * This variable is divided into two bitfields.
222 * Low bits are 1, high bits are the literal.
224 * Explicit offset matches:
225 * Low bits are the match length, high bits are the offset plus 2.
227 * Repeat offset matches:
228 * Low bits are the match length, high bits are the queue index.
231 #define OPTIMUM_OFFSET_SHIFT 9
232 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
233 } _aligned_attribute(8);
236 * Least-recently-used queue for match offsets.
238 * This is represented as a 64-bit integer for efficiency. There are three
239 * offsets of 21 bits each. Bit 64 is garbage.
241 struct lzx_lru_queue {
245 #define LZX_QUEUE64_OFFSET_SHIFT 21
246 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
248 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
249 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
250 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
252 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
253 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
254 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
257 lzx_lru_queue_init(struct lzx_lru_queue *queue)
259 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
260 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
261 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
265 lzx_lru_queue_R0(struct lzx_lru_queue queue)
267 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
271 lzx_lru_queue_R1(struct lzx_lru_queue queue)
273 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
277 lzx_lru_queue_R2(struct lzx_lru_queue queue)
279 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
282 /* Push a match offset onto the front (most recently used) end of the queue. */
283 static inline struct lzx_lru_queue
284 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
286 return (struct lzx_lru_queue) {
287 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
291 /* Pop a match offset off the front (most recently used) end of the queue. */
293 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
295 u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
296 queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
300 /* Swap a match offset to the front of the queue. */
301 static inline struct lzx_lru_queue
302 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
308 return (struct lzx_lru_queue) {
309 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
310 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
311 (queue.R & LZX_QUEUE64_R2_MASK),
314 return (struct lzx_lru_queue) {
315 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
316 (queue.R & LZX_QUEUE64_R1_MASK) |
317 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
321 /* The main LZX compressor structure */
322 struct lzx_compressor {
324 /* The "nice" match length: if a match of this length is found, then
325 * choose it immediately without further consideration. */
326 unsigned nice_match_length;
328 /* The maximum search depth: consider at most this many potential
329 * matches at each position. */
330 unsigned max_search_depth;
332 /* The log base 2 of the LZX window size for LZ match offset encoding
333 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
334 * LZX_MAX_WINDOW_ORDER. */
335 unsigned window_order;
337 /* The number of symbols in the main alphabet. This depends on
338 * @window_order, since @window_order determines the maximum possible
340 unsigned num_main_syms;
342 /* Number of optimization passes per block */
343 unsigned num_optim_passes;
345 /* The preprocessed buffer of data being compressed */
348 /* The number of bytes of data to be compressed, which is the number of
349 * bytes of data in @in_buffer that are actually valid. */
352 /* Pointer to the compress() implementation chosen at allocation time */
353 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
355 /* The Huffman symbol frequency counters for the current block. */
356 struct lzx_freqs freqs;
358 /* The Huffman codes for the current and previous blocks. The one with
359 * index 'codes_index' is for the current block, and the other one is
360 * for the previous block. */
361 struct lzx_codes codes[2];
362 unsigned codes_index;
364 /* The match/literal sequence the algorithm chose for the current block.
366 struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN + 1];
368 /* Table mapping match offset => offset slot for small offsets */
369 #define LZX_NUM_FAST_OFFSETS 32768
370 u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
373 /* Data for greedy or lazy parsing */
375 /* Hash chains matchfinder (MUST BE LAST!!!) */
376 struct hc_matchfinder hc_mf;
379 /* Data for near-optimal parsing */
381 /* The graph nodes for the current block */
382 struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
383 LZX_MAX_MATCH_LEN + 1];
385 /* The cost model for the current block */
386 struct lzx_costs costs;
388 /* Cached matches for the current block */
389 struct lz_match match_cache[LZX_CACHE_LEN + 1 +
390 LZX_MAX_MATCHES_PER_POS];
391 struct lz_match *cache_overflow_mark;
393 /* Hash table for finding length 2 matches */
394 pos_t hash2_tab[LZX_HASH2_LENGTH]
395 _aligned_attribute(MATCHFINDER_ALIGNMENT);
397 /* Binary trees matchfinder (MUST BE LAST!!!) */
398 struct bt_matchfinder bt_mf;
403 /* Compute a hash value for the next 2 bytes of uncompressed data. */
405 lz_hash_2_bytes(const u8 *in_next)
407 u16 next_2_bytes = load_u16_unaligned(in_next);
408 if (LZX_HASH2_ORDER == 16)
411 return lz_hash(next_2_bytes, LZX_HASH2_ORDER);
415 * Structure to keep track of the current state of sending bits to the
416 * compressed output buffer.
418 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
420 struct lzx_output_bitstream {
422 /* Bits that haven't yet been written to the output buffer. */
425 /* Number of bits currently held in @bitbuf. */
428 /* Pointer to the start of the output buffer. */
431 /* Pointer to the position in the output buffer at which the next coding
432 * unit should be written. */
435 /* Pointer past the end of the output buffer. */
440 * Initialize the output bitstream.
443 * The output bitstream structure to initialize.
445 * The buffer being written to.
447 * Size of @buffer, in bytes.
450 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
455 os->next = os->start;
456 os->end = os->start + size / sizeof(le16);
460 * Write some bits to the output bitstream.
462 * The bits are given by the low-order @num_bits bits of @bits. Higher-order
463 * bits in @bits cannot be set. At most 17 bits can be written at once.
465 * @max_num_bits is a compile-time constant that specifies the maximum number of
466 * bits that can ever be written at the call site. Currently, it is used to
467 * optimize away the conditional code for writing a second 16-bit coding unit
468 * when writing fewer than 17 bits.
470 * If the output buffer space is exhausted, then the bits will be ignored, and
471 * lzx_flush_output() will return 0 when it gets called.
474 lzx_write_varbits(struct lzx_output_bitstream *os,
475 const u32 bits, const unsigned num_bits,
476 const unsigned max_num_bits)
478 /* This code is optimized for LZX, which never needs to write more than
479 * 17 bits at once. */
480 LZX_ASSERT(num_bits <= 17);
481 LZX_ASSERT(num_bits <= max_num_bits);
482 LZX_ASSERT(os->bitcount <= 15);
484 /* Add the bits to the bit buffer variable. @bitcount will be at most
485 * 15, so there will be just enough space for the maximum possible
486 * @num_bits of 17. */
487 os->bitcount += num_bits;
488 os->bitbuf = (os->bitbuf << num_bits) | bits;
490 /* Check whether any coding units need to be written. */
491 if (os->bitcount >= 16) {
495 /* Write a coding unit, unless it would overflow the buffer. */
496 if (os->next != os->end)
497 put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
499 /* If writing 17 bits, a second coding unit might need to be
500 * written. But because 'max_num_bits' is a compile-time
501 * constant, the compiler will optimize away this code at most
503 if (max_num_bits == 17 && os->bitcount == 16) {
504 if (os->next != os->end)
505 put_unaligned_u16_le(os->bitbuf, os->next++);
511 /* Use when @num_bits is a compile-time constant. Otherwise use
512 * lzx_write_varbits(). */
514 lzx_write_bits(struct lzx_output_bitstream *os,
515 const u32 bits, const unsigned num_bits)
517 lzx_write_varbits(os, bits, num_bits, num_bits);
521 * Flush the last coding unit to the output buffer if needed. Return the total
522 * number of bytes written to the output buffer, or 0 if an overflow occurred.
525 lzx_flush_output(struct lzx_output_bitstream *os)
527 if (os->next == os->end)
530 if (os->bitcount != 0)
531 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
533 return (const u8 *)os->next - (const u8 *)os->start;
536 /* Build the main, length, and aligned offset Huffman codes used in LZX.
538 * This takes as input the frequency tables for each code and produces as output
539 * a set of tables that map symbols to codewords and codeword lengths. */
541 lzx_make_huffman_codes(struct lzx_compressor *c)
543 const struct lzx_freqs *freqs = &c->freqs;
544 struct lzx_codes *codes = &c->codes[c->codes_index];
546 make_canonical_huffman_code(c->num_main_syms,
547 LZX_MAX_MAIN_CODEWORD_LEN,
550 codes->codewords.main);
552 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
553 LZX_MAX_LEN_CODEWORD_LEN,
556 codes->codewords.len);
558 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
559 LZX_MAX_ALIGNED_CODEWORD_LEN,
562 codes->codewords.aligned);
565 /* Reset the symbol frequencies for the LZX Huffman codes. */
567 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
569 memset(&c->freqs, 0, sizeof(c->freqs));
573 lzx_compute_precode_items(const u8 lens[restrict],
574 const u8 prev_lens[restrict],
575 const unsigned num_lens,
576 u32 precode_freqs[restrict],
577 unsigned precode_items[restrict])
586 itemptr = precode_items;
589 /* Find the next run of codeword lengths. */
591 /* len = the length being repeated */
592 len = lens[run_start];
594 run_end = run_start + 1;
596 /* Fast case for a single length. */
597 if (likely(run_end == num_lens || len != lens[run_end])) {
598 delta = prev_lens[run_start] - len;
601 precode_freqs[delta]++;
607 /* Extend the run. */
610 } while (run_end != num_lens && len == lens[run_end]);
615 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
616 while ((run_end - run_start) >= 20) {
617 extra_bits = min((run_end - run_start) - 20, 0x1f);
619 *itemptr++ = 18 | (extra_bits << 5);
620 run_start += 20 + extra_bits;
623 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
624 if ((run_end - run_start) >= 4) {
625 extra_bits = min((run_end - run_start) - 4, 0xf);
627 *itemptr++ = 17 | (extra_bits << 5);
628 run_start += 4 + extra_bits;
632 /* A run of nonzero lengths. */
634 /* Symbol 19: RLE 4 to 5 of any length at a time. */
635 while ((run_end - run_start) >= 4) {
636 extra_bits = (run_end - run_start) > 4;
637 delta = prev_lens[run_start] - len;
641 precode_freqs[delta]++;
642 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
643 run_start += 4 + extra_bits;
647 /* Output any remaining lengths without RLE. */
648 while (run_start != run_end) {
649 delta = prev_lens[run_start] - len;
652 precode_freqs[delta]++;
656 } while (run_start != num_lens);
658 return itemptr - precode_items;
662 * Output a Huffman code in the compressed form used in LZX.
664 * The Huffman code is represented in the output as a logical series of codeword
665 * lengths from which the Huffman code, which must be in canonical form, can be
668 * The codeword lengths are themselves compressed using a separate Huffman code,
669 * the "precode", which contains a symbol for each possible codeword length in
670 * the larger code as well as several special symbols to represent repeated
671 * codeword lengths (a form of run-length encoding). The precode is itself
672 * constructed in canonical form, and its codeword lengths are represented
673 * literally in 20 4-bit fields that immediately precede the compressed codeword
674 * lengths of the larger code.
676 * Furthermore, the codeword lengths of the larger code are actually represented
677 * as deltas from the codeword lengths of the corresponding code in the previous
681 * Bitstream to which to write the compressed Huffman code.
683 * The codeword lengths, indexed by symbol, in the Huffman code.
685 * The codeword lengths, indexed by symbol, in the corresponding Huffman
686 * code in the previous block, or all zeroes if this is the first block.
688 * The number of symbols in the Huffman code.
691 lzx_write_compressed_code(struct lzx_output_bitstream *os,
692 const u8 lens[restrict],
693 const u8 prev_lens[restrict],
696 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
697 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
698 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
699 unsigned precode_items[num_lens];
700 unsigned num_precode_items;
701 unsigned precode_item;
702 unsigned precode_sym;
705 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
706 precode_freqs[i] = 0;
708 /* Compute the "items" (RLE / literal tokens and extra bits) with which
709 * the codeword lengths in the larger code will be output. */
710 num_precode_items = lzx_compute_precode_items(lens,
716 /* Build the precode. */
717 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
718 LZX_MAX_PRE_CODEWORD_LEN,
719 precode_freqs, precode_lens,
722 /* Output the lengths of the codewords in the precode. */
723 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
724 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
726 /* Output the encoded lengths of the codewords in the larger code. */
727 for (i = 0; i < num_precode_items; i++) {
728 precode_item = precode_items[i];
729 precode_sym = precode_item & 0x1F;
730 lzx_write_varbits(os, precode_codewords[precode_sym],
731 precode_lens[precode_sym],
732 LZX_MAX_PRE_CODEWORD_LEN);
733 if (precode_sym >= 17) {
734 if (precode_sym == 17) {
735 lzx_write_bits(os, precode_item >> 5, 4);
736 } else if (precode_sym == 18) {
737 lzx_write_bits(os, precode_item >> 5, 5);
739 lzx_write_bits(os, (precode_item >> 5) & 1, 1);
740 precode_sym = precode_item >> 6;
741 lzx_write_varbits(os, precode_codewords[precode_sym],
742 precode_lens[precode_sym],
743 LZX_MAX_PRE_CODEWORD_LEN);
749 /* Output a match or literal. */
751 lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
752 unsigned ones_if_aligned, const struct lzx_codes *codes)
754 u64 data = item.data;
755 unsigned main_symbol;
757 unsigned num_extra_bits;
760 main_symbol = data & 0x3FF;
762 lzx_write_varbits(os, codes->codewords.main[main_symbol],
763 codes->lens.main[main_symbol],
764 LZX_MAX_MAIN_CODEWORD_LEN);
766 if (main_symbol < LZX_NUM_CHARS) /* Literal? */
769 len_symbol = (data >> 10) & 0xFF;
771 if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
772 lzx_write_varbits(os, codes->codewords.len[len_symbol],
773 codes->lens.len[len_symbol],
774 LZX_MAX_LEN_CODEWORD_LEN);
777 num_extra_bits = (data >> 18) & 0x1F;
778 if (num_extra_bits == 0) /* Small offset or repeat offset match? */
781 extra_bits = data >> 23;
783 if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
785 /* Aligned offset blocks: The low 3 bits of the extra offset
786 * bits are Huffman-encoded using the aligned offset code. The
787 * remaining bits are output literally. */
789 lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
790 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
791 17 - LZX_NUM_ALIGNED_OFFSET_BITS);
793 lzx_write_varbits(os,
794 codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
795 codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
796 LZX_MAX_ALIGNED_CODEWORD_LEN);
798 /* Verbatim blocks, or fewer than 3 extra bits: All extra
799 * offset bits are output literally. */
800 lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
805 * Write all matches and literal bytes (which were precomputed) in an LZX
806 * compressed block to the output bitstream in the final compressed
810 * The output bitstream.
812 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
813 * LZX_BLOCKTYPE_VERBATIM).
815 * The array of matches/literals to output.
817 * Number of matches/literals to output (length of @items).
819 * The main, length, and aligned offset Huffman codes for the current
820 * LZX compressed block.
823 lzx_write_items(struct lzx_output_bitstream *os, int block_type,
824 const struct lzx_item items[], u32 num_items,
825 const struct lzx_codes *codes)
827 unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
829 for (u32 i = 0; i < num_items; i++)
830 lzx_write_item(os, items[i], ones_if_aligned, codes);
834 lzx_write_compressed_block(int block_type,
836 unsigned window_order,
837 unsigned num_main_syms,
838 const struct lzx_item chosen_items[],
839 u32 num_chosen_items,
840 const struct lzx_codes * codes,
841 const struct lzx_lens * prev_lens,
842 struct lzx_output_bitstream * os)
844 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
845 block_type == LZX_BLOCKTYPE_VERBATIM);
847 /* The first three bits indicate the type of block and are one of the
848 * LZX_BLOCKTYPE_* constants. */
849 lzx_write_bits(os, block_type, 3);
851 /* Output the block size.
853 * The original LZX format seemed to always encode the block size in 3
854 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
855 * uses the first bit to indicate whether the block is the default size
856 * (32768) or a different size given explicitly by the next 16 bits.
858 * By default, this compressor uses a window size of 32768 and therefore
859 * follows the WIMGAPI behavior. However, this compressor also supports
860 * window sizes greater than 32768 bytes, which do not appear to be
861 * supported by WIMGAPI. In such cases, we retain the default size bit
862 * to mean a size of 32768 bytes but output non-default block size in 24
863 * bits rather than 16. The compatibility of this behavior is unknown
864 * because WIMs created with chunk size greater than 32768 can seemingly
865 * only be opened by wimlib anyway. */
866 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
867 lzx_write_bits(os, 1, 1);
869 lzx_write_bits(os, 0, 1);
871 if (window_order >= 16)
872 lzx_write_bits(os, block_size >> 16, 8);
874 lzx_write_bits(os, block_size & 0xFFFF, 16);
877 /* If it's an aligned offset block, output the aligned offset code. */
878 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
879 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
880 lzx_write_bits(os, codes->lens.aligned[i],
881 LZX_ALIGNEDCODE_ELEMENT_SIZE);
885 /* Output the main code (two parts). */
886 lzx_write_compressed_code(os, codes->lens.main,
889 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
890 prev_lens->main + LZX_NUM_CHARS,
891 num_main_syms - LZX_NUM_CHARS);
893 /* Output the length code. */
894 lzx_write_compressed_code(os, codes->lens.len,
896 LZX_LENCODE_NUM_SYMBOLS);
898 /* Output the compressed matches and literals. */
899 lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
902 /* Given the frequencies of symbols in an LZX-compressed block and the
903 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
904 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
905 * will take fewer bits to output. */
907 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
908 const struct lzx_codes * codes)
910 u32 aligned_cost = 0;
911 u32 verbatim_cost = 0;
913 /* A verbatim block requires 3 bits in each place that an aligned symbol
914 * would be used in an aligned offset block. */
915 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
916 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
917 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
920 /* Account for output of the aligned offset code. */
921 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
923 if (aligned_cost < verbatim_cost)
924 return LZX_BLOCKTYPE_ALIGNED;
926 return LZX_BLOCKTYPE_VERBATIM;
930 * Finish an LZX block:
932 * - build the Huffman codes
933 * - decide whether to output the block as VERBATIM or ALIGNED
935 * - swap the indices of the current and previous Huffman codes
938 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
939 u32 block_size, u32 num_chosen_items)
943 lzx_make_huffman_codes(c);
945 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
946 &c->codes[c->codes_index]);
947 lzx_write_compressed_block(block_type,
953 &c->codes[c->codes_index],
954 &c->codes[c->codes_index ^ 1].lens,
959 /* Return the offset slot for the specified offset, which must be
960 * less than LZX_NUM_FAST_OFFSETS. */
961 static inline unsigned
962 lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset)
964 LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS);
965 return c->offset_slot_fast[offset];
968 /* Tally, and optionally record, the specified literal byte. */
970 lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
971 struct lzx_item **next_chosen_item)
973 unsigned main_symbol = lzx_main_symbol_for_literal(literal);
975 c->freqs.main[main_symbol]++;
977 if (next_chosen_item) {
978 *(*next_chosen_item)++ = (struct lzx_item) {
984 /* Tally, and optionally record, the specified repeat offset match. */
986 lzx_declare_repeat_offset_match(struct lzx_compressor *c,
987 unsigned len, unsigned rep_index,
988 struct lzx_item **next_chosen_item)
992 unsigned main_symbol;
994 if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
995 len_header = len - LZX_MIN_MATCH_LEN;
996 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
998 len_header = LZX_NUM_PRIMARY_LENS;
999 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1000 c->freqs.len[len_symbol]++;
1003 main_symbol = lzx_main_symbol_for_match(rep_index, len_header);
1005 c->freqs.main[main_symbol]++;
1007 if (next_chosen_item) {
1008 *(*next_chosen_item)++ = (struct lzx_item) {
1009 .data = (u64)main_symbol | ((u64)len_symbol << 10),
1014 /* Tally, and optionally record, the specified explicit offset match. */
1016 lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
1017 struct lzx_item **next_chosen_item)
1019 unsigned len_header;
1020 unsigned len_symbol;
1021 unsigned main_symbol;
1022 unsigned offset_slot;
1023 unsigned num_extra_bits;
1026 if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
1027 len_header = len - LZX_MIN_MATCH_LEN;
1028 len_symbol = LZX_LENCODE_NUM_SYMBOLS;
1030 len_header = LZX_NUM_PRIMARY_LENS;
1031 len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
1032 c->freqs.len[len_symbol]++;
1035 offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1036 lzx_get_offset_slot_fast(c, offset) :
1037 lzx_get_offset_slot(offset);
1039 main_symbol = lzx_main_symbol_for_match(offset_slot, len_header);
1041 c->freqs.main[main_symbol]++;
1043 num_extra_bits = lzx_extra_offset_bits[offset_slot];
1045 if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS)
1046 c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) &
1047 LZX_ALIGNED_OFFSET_BITMASK]++;
1049 if (next_chosen_item) {
1051 extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) -
1052 lzx_offset_slot_base[offset_slot];
1054 BUILD_BUG_ON(LZX_MAINCODE_MAX_NUM_SYMBOLS > (1 << 10));
1055 BUILD_BUG_ON(LZX_LENCODE_NUM_SYMBOLS > (1 << 8));
1056 *(*next_chosen_item)++ = (struct lzx_item) {
1057 .data = (u64)main_symbol |
1058 ((u64)len_symbol << 10) |
1059 ((u64)num_extra_bits << 18) |
1060 ((u64)extra_bits << 23),
1066 /* Tally, and optionally record, the specified match or literal. */
1068 lzx_declare_item(struct lzx_compressor *c, u32 item,
1069 struct lzx_item **next_chosen_item)
1071 u32 len = item & OPTIMUM_LEN_MASK;
1072 u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1075 lzx_declare_literal(c, offset_data, next_chosen_item);
1076 else if (offset_data < LZX_NUM_RECENT_OFFSETS)
1077 lzx_declare_repeat_offset_match(c, len, offset_data,
1080 lzx_declare_explicit_offset_match(c, len,
1081 offset_data - LZX_OFFSET_ADJUSTMENT,
1086 lzx_record_item_list(struct lzx_compressor *c,
1087 struct lzx_optimum_node *cur_node,
1088 struct lzx_item **next_chosen_item)
1090 struct lzx_optimum_node *end_node;
1094 /* The list is currently in reverse order (last item to first item).
1096 end_node = cur_node;
1097 saved_item = cur_node->item;
1100 cur_node -= item & OPTIMUM_LEN_MASK;
1101 saved_item = cur_node->item;
1102 cur_node->item = item;
1103 } while (cur_node != c->optimum_nodes);
1105 /* Walk the list of items from beginning to end, tallying and recording
1108 lzx_declare_item(c, cur_node->item, next_chosen_item);
1109 cur_node += (cur_node->item) & OPTIMUM_LEN_MASK;
1110 } while (cur_node != end_node);
1114 lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node)
1116 /* Since we're just tallying the items, we don't need to reverse the
1117 * list. Processing the items in reverse order is fine. */
1119 lzx_declare_item(c, cur_node->item, NULL);
1120 cur_node -= (cur_node->item & OPTIMUM_LEN_MASK);
1121 } while (cur_node != c->optimum_nodes);
1125 * Find an inexpensive path through the graph of possible match/literal choices
1126 * for the current block. The nodes of the graph are
1127 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1128 * the current block, plus one extra node for end-of-block. The edges of the
1129 * graph are matches and literals. The goal is to find the minimum cost path
1130 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1132 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1133 * proceeding forwards one node at a time. At each node, a selection of matches
1134 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1135 * length 'len' provides a new path to reach the node 'len' bytes later. If
1136 * such a path is the lowest cost found so far to reach that later node, then
1137 * that later node is updated with the new path.
1139 * Note that although this algorithm is based on minimum cost path search, due
1140 * to various simplifying assumptions the result is not guaranteed to be the
1141 * true minimum cost, or "optimal", path over the graph of all valid LZX
1142 * representations of this block.
1144 * Also, note that because of the presence of the recent offsets queue (which is
1145 * a type of adaptive state), the algorithm cannot work backwards and compute
1146 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1147 * algorithm handles this adaptive state in the "minimum-cost" parse is actually
1148 * only an approximation. It's possible for the globally optimal, minimum cost
1149 * path to contain a prefix, ending at a position, where that path prefix is
1150 * *not* the minimum cost path to that position. This can happen if such a path
1151 * prefix results in a different adaptive state which results in lower costs
1152 * later. The algorithm does not solve this problem; it only considers the
1153 * lowest cost to reach each individual position.
1155 static struct lzx_lru_queue
1156 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1157 const u8 * const restrict block_begin,
1158 const u32 block_size,
1159 const struct lzx_lru_queue initial_queue)
1161 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1162 struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1163 struct lz_match *cache_ptr = c->match_cache;
1164 const u8 *in_next = block_begin;
1165 const u8 * const block_end = block_begin + block_size;
1167 /* Instead of storing the match offset LRU queues in the
1168 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1169 * storing them in a smaller array. This works because the algorithm
1170 * only requires a limited history of the adaptive state. Once a given
1171 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1172 * it is no longer needed. */
1173 struct lzx_lru_queue queues[512];
1175 BUILD_BUG_ON(ARRAY_LEN(queues) < LZX_MAX_MATCH_LEN + 1);
1176 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1178 /* Initially, the cost to reach each node is "infinity". */
1179 memset(c->optimum_nodes, 0xFF,
1180 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1182 QUEUE(block_begin) = initial_queue;
1184 /* The following loop runs 'block_size' iterations, one per node. */
1186 unsigned num_matches;
1191 * A selection of matches for the block was already saved in
1192 * memory so that we don't have to run the uncompressed data
1193 * through the matchfinder on every optimization pass. However,
1194 * we still search for repeat offset matches during each
1195 * optimization pass because we cannot predict the state of the
1196 * recent offsets queue. But as a heuristic, we don't bother
1197 * searching for repeat offset matches if the general-purpose
1198 * matchfinder failed to find any matches.
1200 * Note that a match of length n at some offset implies there is
1201 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1202 * that same offset. In other words, we don't necessarily need
1203 * to use the full length of a match. The key heuristic that
1204 * saves a significicant amount of time is that for each
1205 * distinct length, we only consider the smallest offset for
1206 * which that length is available. This heuristic also applies
1207 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1208 * any explicit offset. Of course, this heuristic may be
1209 * produce suboptimal results because offset slots in LZX are
1210 * subject to entropy encoding, but in practice this is a useful
1214 num_matches = cache_ptr->length;
1218 struct lz_match *end_matches = cache_ptr + num_matches;
1219 unsigned next_len = LZX_MIN_MATCH_LEN;
1220 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1223 /* Consider R0 match */
1224 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1225 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1227 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1229 u32 cost = cur_node->cost +
1230 c->costs.match_cost[0][
1231 next_len - LZX_MIN_MATCH_LEN];
1232 if (cost <= (cur_node + next_len)->cost) {
1233 (cur_node + next_len)->cost = cost;
1234 (cur_node + next_len)->item =
1235 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1237 if (unlikely(++next_len > max_len)) {
1238 cache_ptr = end_matches;
1241 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1245 /* Consider R1 match */
1246 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1247 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1249 if (matchptr[next_len - 1] != in_next[next_len - 1])
1251 for (unsigned len = 2; len < next_len - 1; len++)
1252 if (matchptr[len] != in_next[len])
1255 u32 cost = cur_node->cost +
1256 c->costs.match_cost[1][
1257 next_len - LZX_MIN_MATCH_LEN];
1258 if (cost <= (cur_node + next_len)->cost) {
1259 (cur_node + next_len)->cost = cost;
1260 (cur_node + next_len)->item =
1261 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1263 if (unlikely(++next_len > max_len)) {
1264 cache_ptr = end_matches;
1267 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1271 /* Consider R2 match */
1272 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1273 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1275 if (matchptr[next_len - 1] != in_next[next_len - 1])
1277 for (unsigned len = 2; len < next_len - 1; len++)
1278 if (matchptr[len] != in_next[len])
1281 u32 cost = cur_node->cost +
1282 c->costs.match_cost[2][
1283 next_len - LZX_MIN_MATCH_LEN];
1284 if (cost <= (cur_node + next_len)->cost) {
1285 (cur_node + next_len)->cost = cost;
1286 (cur_node + next_len)->item =
1287 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1289 if (unlikely(++next_len > max_len)) {
1290 cache_ptr = end_matches;
1293 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1297 while (next_len > cache_ptr->length)
1298 if (++cache_ptr == end_matches)
1301 /* Consider explicit offset matches */
1303 u32 offset = cache_ptr->offset;
1304 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1305 unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
1306 lzx_get_offset_slot_fast(c, offset) :
1307 lzx_get_offset_slot(offset);
1309 u32 cost = cur_node->cost +
1310 c->costs.match_cost[offset_slot][
1311 next_len - LZX_MIN_MATCH_LEN];
1312 #if LZX_CONSIDER_ALIGNED_COSTS
1313 if (lzx_extra_offset_bits[offset_slot] >=
1314 LZX_NUM_ALIGNED_OFFSET_BITS)
1315 cost += c->costs.aligned[offset_data &
1316 LZX_ALIGNED_OFFSET_BITMASK];
1318 if (cost < (cur_node + next_len)->cost) {
1319 (cur_node + next_len)->cost = cost;
1320 (cur_node + next_len)->item =
1321 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1323 } while (++next_len <= cache_ptr->length);
1324 } while (++cache_ptr != end_matches);
1329 /* Consider coding a literal.
1331 * To avoid an extra branch, actually checking the preferability
1332 * of coding the literal is integrated into the queue update
1334 literal = *in_next++;
1335 cost = cur_node->cost +
1336 c->costs.main[lzx_main_symbol_for_literal(literal)];
1338 /* Advance to the next position. */
1341 /* The lowest-cost path to the current position is now known.
1342 * Finalize the recent offsets queue that results from taking
1343 * this lowest-cost path. */
1345 if (cost <= cur_node->cost) {
1346 /* Literal: queue remains unchanged. */
1347 cur_node->cost = cost;
1348 cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1;
1349 QUEUE(in_next) = QUEUE(in_next - 1);
1351 /* Match: queue update is needed. */
1352 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1353 u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1354 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1355 /* Explicit offset match: insert offset at front */
1357 lzx_lru_queue_push(QUEUE(in_next - len),
1358 offset_data - LZX_OFFSET_ADJUSTMENT);
1360 /* Repeat offset match: swap offset to front */
1362 lzx_lru_queue_swap(QUEUE(in_next - len),
1366 } while (cur_node != end_node);
1368 /* Return the match offset queue at the end of the minimum-cost path. */
1369 return QUEUE(block_end);
1372 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1374 lzx_compute_match_costs(struct lzx_compressor *c)
1376 unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1377 struct lzx_costs *costs = &c->costs;
1379 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1381 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1382 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1385 #if LZX_CONSIDER_ALIGNED_COSTS
1386 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1387 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1390 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1391 costs->match_cost[offset_slot][i] =
1392 costs->main[main_symbol++] + extra_cost;
1394 extra_cost += costs->main[main_symbol];
1396 for (; i < LZX_NUM_LENS; i++)
1397 costs->match_cost[offset_slot][i] =
1398 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1402 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1405 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1408 bool have_byte[256];
1409 unsigned num_used_bytes;
1411 /* The costs below are hard coded to use a scaling factor of 16. */
1412 BUILD_BUG_ON(LZX_BIT_COST != 16);
1417 * - Use smaller initial costs for literal symbols when the input buffer
1418 * contains fewer distinct bytes.
1420 * - Assume that match symbols are more costly than literal symbols.
1422 * - Assume that length symbols for shorter lengths are less costly than
1423 * length symbols for longer lengths.
1426 for (i = 0; i < 256; i++)
1427 have_byte[i] = false;
1429 for (i = 0; i < block_size; i++)
1430 have_byte[block[i]] = true;
1433 for (i = 0; i < 256; i++)
1434 num_used_bytes += have_byte[i];
1436 for (i = 0; i < 256; i++)
1437 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1439 for (; i < c->num_main_syms; i++)
1440 c->costs.main[i] = 170;
1442 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1443 c->costs.len[i] = 103 + (i / 4);
1445 #if LZX_CONSIDER_ALIGNED_COSTS
1446 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1447 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1450 lzx_compute_match_costs(c);
1453 /* Update the current cost model to reflect the computed Huffman codes. */
1455 lzx_update_costs(struct lzx_compressor *c)
1458 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1460 for (i = 0; i < c->num_main_syms; i++)
1461 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1463 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1464 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1466 #if LZX_CONSIDER_ALIGNED_COSTS
1467 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1468 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1471 lzx_compute_match_costs(c);
1474 static struct lzx_lru_queue
1475 lzx_optimize_and_write_block(struct lzx_compressor *c,
1476 struct lzx_output_bitstream *os,
1477 const u8 *block_begin, const u32 block_size,
1478 const struct lzx_lru_queue initial_queue)
1480 unsigned num_passes_remaining = c->num_optim_passes;
1481 struct lzx_item *next_chosen_item;
1482 struct lzx_lru_queue new_queue;
1484 /* The first optimization pass uses a default cost model. Each
1485 * additional optimization pass uses a cost model derived from the
1486 * Huffman code computed in the previous pass. */
1488 lzx_set_default_costs(c, block_begin, block_size);
1489 lzx_reset_symbol_frequencies(c);
1491 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1493 if (num_passes_remaining > 1) {
1494 lzx_tally_item_list(c, c->optimum_nodes + block_size);
1495 lzx_make_huffman_codes(c);
1496 lzx_update_costs(c);
1497 lzx_reset_symbol_frequencies(c);
1499 } while (--num_passes_remaining);
1501 next_chosen_item = c->chosen_items;
1502 lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item);
1503 lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items);
1508 * This is the "near-optimal" LZX compressor.
1510 * For each block, it performs a relatively thorough graph search to find an
1511 * inexpensive (in terms of compressed size) way to output that block.
1513 * Note: there are actually many things this algorithm leaves on the table in
1514 * terms of compression ratio. So although it may be "near-optimal", it is
1515 * certainly not "optimal". The goal is not to produce the optimal compression
1516 * ratio, which for LZX is probably impossible within any practical amount of
1517 * time, but rather to produce a compression ratio significantly better than a
1518 * simpler "greedy" or "lazy" parse while still being relatively fast.
1521 lzx_compress_near_optimal(struct lzx_compressor *c,
1522 struct lzx_output_bitstream *os)
1524 const u8 * const in_begin = c->in_buffer;
1525 const u8 * in_next = in_begin;
1526 const u8 * const in_end = in_begin + c->in_nbytes;
1527 unsigned max_len = LZX_MAX_MATCH_LEN;
1528 unsigned nice_len = min(c->nice_match_length, max_len);
1530 struct lzx_lru_queue queue;
1532 bt_matchfinder_init(&c->bt_mf);
1533 matchfinder_init(c->hash2_tab, LZX_HASH2_LENGTH);
1534 next_hash = bt_matchfinder_hash_3_bytes(in_next);
1535 lzx_lru_queue_init(&queue);
1538 /* Starting a new block */
1539 const u8 * const in_block_begin = in_next;
1540 const u8 * const in_block_end =
1541 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1543 /* Run the block through the matchfinder and cache the matches. */
1544 struct lz_match *cache_ptr = c->match_cache;
1546 struct lz_match *lz_matchptr;
1551 /* If approaching the end of the input buffer, adjust
1552 * 'max_len' and 'nice_len' accordingly. */
1553 if (unlikely(max_len > in_end - in_next)) {
1554 max_len = in_end - in_next;
1555 nice_len = min(max_len, nice_len);
1557 /* This extra check is needed to ensure that
1558 * reading the next 3 bytes when looking for a
1559 * length 2 match is valid. In addition, we
1560 * cannot allow ourselves to find a length 2
1561 * match of the very last two bytes with the
1562 * very first two bytes, since such a match has
1563 * an offset too large to be represented. */
1564 if (unlikely(max_len <
1565 max(LZ_HASH_REQUIRED_NBYTES, 3)))
1568 cache_ptr->length = 0;
1574 lz_matchptr = cache_ptr + 1;
1576 /* Check for a length 2 match. */
1577 hash2 = lz_hash_2_bytes(in_next);
1578 cur_match = c->hash2_tab[hash2];
1579 c->hash2_tab[hash2] = in_next - in_begin;
1580 if (matchfinder_node_valid(cur_match) &&
1581 (LZX_HASH2_ORDER == 16 ||
1582 load_u16_unaligned(&in_begin[cur_match]) ==
1583 load_u16_unaligned(in_next)) &&
1584 in_begin[cur_match + 2] != in_next[2])
1586 lz_matchptr->length = 2;
1587 lz_matchptr->offset = in_next - &in_begin[cur_match];
1591 /* Check for matches of length >= 3. */
1592 lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1598 c->max_search_depth,
1603 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1604 cache_ptr = lz_matchptr;
1607 * If there was a very long match found, then don't
1608 * cache any matches for the bytes covered by that
1609 * match. This avoids degenerate behavior when
1610 * compressing highly redundant data, where the number
1611 * of matches can be very large.
1613 * This heuristic doesn't actually hurt the compression
1614 * ratio very much. If there's a long match, then the
1615 * data must be highly compressible, so it doesn't
1616 * matter as much what we do.
1618 if (best_len >= nice_len) {
1621 if (unlikely(max_len > in_end - in_next)) {
1622 max_len = in_end - in_next;
1623 nice_len = min(max_len, nice_len);
1624 if (unlikely(max_len <
1625 max(LZ_HASH_REQUIRED_NBYTES, 3)))
1628 cache_ptr->length = 0;
1633 c->hash2_tab[lz_hash_2_bytes(in_next)] =
1635 bt_matchfinder_skip_position(&c->bt_mf,
1640 c->max_search_depth,
1643 cache_ptr->length = 0;
1645 } while (--best_len);
1647 } while (in_next < in_block_end &&
1648 likely(cache_ptr < c->cache_overflow_mark));
1650 /* We've finished running the block through the matchfinder.
1651 * Now choose a match/literal sequence and write the block. */
1653 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1654 in_next - in_block_begin,
1656 } while (in_next != in_end);
1660 * Given a pointer to the current byte sequence and the current list of recent
1661 * match offsets, find the longest repeat offset match.
1663 * If no match of at least 2 bytes is found, then return 0.
1665 * If a match of at least 2 bytes is found, then return its length and set
1666 * *rep_max_idx_ret to the index of its offset in @queue.
1669 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1670 const u32 bytes_remaining,
1671 struct lzx_lru_queue queue,
1672 unsigned *rep_max_idx_ret)
1674 BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
1675 LZX_ASSERT(bytes_remaining >= 2);
1677 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1678 const u16 next_2_bytes = load_u16_unaligned(in_next);
1680 unsigned rep_max_len;
1681 unsigned rep_max_idx;
1684 matchptr = in_next - lzx_lru_queue_pop(&queue);
1685 if (load_u16_unaligned(matchptr) == next_2_bytes)
1686 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1691 matchptr = in_next - lzx_lru_queue_pop(&queue);
1692 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1693 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1694 if (rep_len > rep_max_len) {
1695 rep_max_len = rep_len;
1700 matchptr = in_next - lzx_lru_queue_pop(&queue);
1701 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1702 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1703 if (rep_len > rep_max_len) {
1704 rep_max_len = rep_len;
1709 *rep_max_idx_ret = rep_max_idx;
1713 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
1714 static inline unsigned
1715 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1717 unsigned score = len;
1719 if (adjusted_offset < 4096)
1722 if (adjusted_offset < 256)
1728 static inline unsigned
1729 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1734 /* This is the "lazy" LZX compressor. */
1736 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1738 const u8 * const in_begin = c->in_buffer;
1739 const u8 * in_next = in_begin;
1740 const u8 * const in_end = in_begin + c->in_nbytes;
1741 unsigned max_len = LZX_MAX_MATCH_LEN;
1742 unsigned nice_len = min(c->nice_match_length, max_len);
1743 struct lzx_lru_queue queue;
1745 hc_matchfinder_init(&c->hc_mf);
1746 lzx_lru_queue_init(&queue);
1749 /* Starting a new block */
1751 const u8 * const in_block_begin = in_next;
1752 const u8 * const in_block_end =
1753 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1754 struct lzx_item *next_chosen_item = c->chosen_items;
1757 u32 cur_offset_data;
1761 u32 next_offset_data;
1762 unsigned next_score;
1763 unsigned rep_max_len;
1764 unsigned rep_max_idx;
1768 lzx_reset_symbol_frequencies(c);
1771 if (unlikely(max_len > in_end - in_next)) {
1772 max_len = in_end - in_next;
1773 nice_len = min(max_len, nice_len);
1776 /* Find the longest match at the current position. */
1778 cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1784 c->max_search_depth,
1788 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
1789 cur_offset != lzx_lru_queue_R0(queue) &&
1790 cur_offset != lzx_lru_queue_R1(queue) &&
1791 cur_offset != lzx_lru_queue_R2(queue)))
1793 /* There was no match found, or the only match found
1794 * was a distant length 3 match. Output a literal. */
1795 lzx_declare_literal(c, *in_next++,
1800 if (cur_offset == lzx_lru_queue_R0(queue)) {
1802 cur_offset_data = 0;
1803 skip_len = cur_len - 1;
1804 goto choose_cur_match;
1807 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
1808 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
1810 /* Consider a repeat offset match */
1811 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1817 if (rep_max_len >= 3 &&
1818 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1819 rep_max_idx)) >= cur_score)
1821 cur_len = rep_max_len;
1822 cur_offset_data = rep_max_idx;
1823 skip_len = rep_max_len - 1;
1824 goto choose_cur_match;
1829 /* We have a match at the current position. */
1831 /* If we have a very long match, choose it immediately. */
1832 if (cur_len >= nice_len) {
1833 skip_len = cur_len - 1;
1834 goto choose_cur_match;
1837 /* See if there's a better match at the next position. */
1839 if (unlikely(max_len > in_end - in_next)) {
1840 max_len = in_end - in_next;
1841 nice_len = min(max_len, nice_len);
1844 next_len = hc_matchfinder_longest_match(&c->hc_mf,
1850 c->max_search_depth / 2,
1853 if (next_len <= cur_len - 2) {
1855 skip_len = cur_len - 2;
1856 goto choose_cur_match;
1859 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
1860 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
1862 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
1868 if (rep_max_len >= 3 &&
1869 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
1870 rep_max_idx)) >= next_score)
1873 if (rep_score > cur_score) {
1874 /* The next match is better, and it's a
1875 * repeat offset match. */
1876 lzx_declare_literal(c, *(in_next - 2),
1878 cur_len = rep_max_len;
1879 cur_offset_data = rep_max_idx;
1880 skip_len = cur_len - 1;
1881 goto choose_cur_match;
1884 if (next_score > cur_score) {
1885 /* The next match is better, and it's an
1886 * explicit offset match. */
1887 lzx_declare_literal(c, *(in_next - 2),
1890 cur_offset_data = next_offset_data;
1891 cur_score = next_score;
1892 goto have_cur_match;
1896 /* The original match was better. */
1897 skip_len = cur_len - 2;
1900 if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) {
1901 lzx_declare_repeat_offset_match(c, cur_len,
1904 queue = lzx_lru_queue_swap(queue, cur_offset_data);
1906 lzx_declare_explicit_offset_match(c, cur_len,
1907 cur_offset_data - LZX_OFFSET_ADJUSTMENT,
1909 queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT);
1912 hc_matchfinder_skip_positions(&c->hc_mf,
1917 in_next += skip_len;
1918 } while (in_next < in_block_end);
1920 lzx_finish_block(c, os, in_next - in_block_begin,
1921 next_chosen_item - c->chosen_items);
1922 } while (in_next != in_end);
1926 lzx_init_offset_slot_fast(struct lzx_compressor *c)
1930 for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
1932 while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1])
1935 c->offset_slot_fast[offset] = slot;
1940 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
1942 if (compression_level <= LZX_MAX_FAST_LEVEL) {
1943 return offsetof(struct lzx_compressor, hc_mf) +
1944 hc_matchfinder_size(max_bufsize);
1946 return offsetof(struct lzx_compressor, bt_mf) +
1947 bt_matchfinder_size(max_bufsize);
1952 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
1956 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
1959 size += lzx_get_compressor_size(max_bufsize, compression_level);
1960 size += max_bufsize; /* in_buffer */
1965 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
1968 unsigned window_order;
1969 struct lzx_compressor *c;
1971 window_order = lzx_get_window_order(max_bufsize);
1972 if (window_order == 0)
1973 return WIMLIB_ERR_INVALID_PARAM;
1975 c = ALIGNED_MALLOC(lzx_get_compressor_size(max_bufsize,
1977 MATCHFINDER_ALIGNMENT);
1981 c->num_main_syms = lzx_get_num_main_syms(window_order);
1982 c->window_order = window_order;
1984 c->in_buffer = MALLOC(max_bufsize);
1988 if (compression_level <= LZX_MAX_FAST_LEVEL) {
1990 /* Fast compression: Use lazy parsing. */
1992 c->impl = lzx_compress_lazy;
1993 c->max_search_depth = (36 * compression_level) / 20;
1994 c->nice_match_length = min((72 * compression_level) / 20,
1999 /* Normal / high compression: Use near-optimal parsing. */
2001 c->impl = lzx_compress_near_optimal;
2003 /* Scale nice_match_length and max_search_depth with the
2004 * compression level. */
2005 c->max_search_depth = (24 * compression_level) / 50;
2006 c->nice_match_length = min((32 * compression_level) / 50,
2009 /* Set a number of optimization passes appropriate for the
2010 * compression level. */
2012 c->num_optim_passes = 1;
2014 if (compression_level >= 45)
2015 c->num_optim_passes++;
2017 /* Use more optimization passes for higher compression levels.
2018 * But the more passes there are, the less they help --- so
2019 * don't add them linearly. */
2020 if (compression_level >= 70) {
2021 c->num_optim_passes++;
2022 if (compression_level >= 100)
2023 c->num_optim_passes++;
2024 if (compression_level >= 150)
2025 c->num_optim_passes++;
2026 if (compression_level >= 200)
2027 c->num_optim_passes++;
2028 if (compression_level >= 300)
2029 c->num_optim_passes++;
2032 c->cache_overflow_mark = &c->match_cache[LZX_CACHE_LEN];
2035 lzx_init_offset_slot_fast(c);
2042 return WIMLIB_ERR_NOMEM;
2046 lzx_compress(const void *in, size_t in_nbytes,
2047 void *out, size_t out_nbytes_avail, void *_c)
2049 struct lzx_compressor *c = _c;
2050 struct lzx_output_bitstream os;
2052 /* Don't bother trying to compress very small inputs. */
2053 if (in_nbytes < 100)
2056 /* Copy the input data into the internal buffer and preprocess it. */
2057 memcpy(c->in_buffer, in, in_nbytes);
2058 c->in_nbytes = in_nbytes;
2059 lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2061 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2063 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2065 /* Initialize the output bitstream. */
2066 lzx_init_output(&os, out, out_nbytes_avail);
2068 /* Call the compression level-specific compress() function. */
2071 /* Flush the output bitstream and return the compressed size or 0. */
2072 return lzx_flush_output(&os);
2076 lzx_free_compressor(void *_c)
2078 struct lzx_compressor *c = _c;
2084 const struct compressor_ops lzx_compressor_ops = {
2085 .get_needed_memory = lzx_get_needed_memory,
2086 .create_compressor = lzx_create_compressor,
2087 .compress = lzx_compress,
2088 .free_compressor = lzx_free_compressor,