4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * Start a new LZX block (with new Huffman codes) after this many bytes.
70 * Note: actual block sizes may slightly exceed this value.
72 * TODO: recursive splitting and cost evaluation might be good for an extremely
73 * high compression mode, but otherwise it is almost always far too slow for how
74 * much it helps. Perhaps some sort of heuristic would be useful?
76 #define LZX_DIV_BLOCK_SIZE 32768
79 * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
80 * match cache for each byte position. This value should be high enough so that
81 * nearly the time, all matches found in a given block can fit in the match
82 * cache. However, fallback behavior (immediately terminating the block) on
83 * cache overflow is still required.
85 #define LZX_CACHE_PER_POS 7
88 * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
89 * excluding the extra "overflow" entries. The per-position multiplier is '1 +
90 * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
91 * overhead of one lz_match per position, used to hold the match count at that
94 #define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
97 * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
98 * ever be saved in the match cache for a single position. Since each match we
99 * save for a single position has a distinct length, we can use the number of
100 * possible match lengths in LZX as this bound. This bound is guaranteed to be
101 * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
102 * it will never actually be reached.
104 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
107 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
108 * This makes it possible to consider fractional bit costs.
110 * Note: this is only useful as a statistical trick for when the true costs are
111 * unknown. In reality, each token in LZX requires a whole number of bits to
114 #define LZX_BIT_COST 16
117 * Consideration of aligned offset costs is disabled for now, due to
118 * insufficient benefit gained from the time spent.
120 #define LZX_CONSIDER_ALIGNED_COSTS 0
123 * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
126 #define LZX_MAX_FAST_LEVEL 34
129 * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
130 * for finding length 2 matches. This can be as high as 16 (in which case the
131 * hash function is trivial), but using a smaller hash table speeds up
132 * compression due to reduced cache pressure.
134 #define LZX_HASH2_ORDER 12
135 #define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
138 * These are the compressor-side limits on the codeword lengths for each Huffman
139 * code. To make outputting bits slightly faster, some of these limits are
140 * lower than the limits defined by the LZX format. This does not significantly
141 * affect the compression ratio, at least for the block sizes we use.
143 #define MAIN_CODEWORD_LIMIT 12 /* 64-bit: can buffer 4 main symbols */
144 #define LENGTH_CODEWORD_LIMIT 12
145 #define ALIGNED_CODEWORD_LIMIT 7
146 #define PRE_CODEWORD_LIMIT 7
148 #include "wimlib/lzx_common.h"
151 * The maximum allowed window order for the matchfinder.
153 #define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
157 #include "wimlib/bt_matchfinder.h"
158 #include "wimlib/compress_common.h"
159 #include "wimlib/compressor_ops.h"
160 #include "wimlib/error.h"
161 #include "wimlib/hc_matchfinder.h"
162 #include "wimlib/lz_extend.h"
163 #include "wimlib/unaligned.h"
164 #include "wimlib/util.h"
166 struct lzx_output_bitstream;
168 /* Codewords for the LZX Huffman codes. */
169 struct lzx_codewords {
170 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
171 u32 len[LZX_LENCODE_NUM_SYMBOLS];
172 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
175 /* Codeword lengths (in bits) for the LZX Huffman codes.
176 * A zero length means the corresponding codeword has zero frequency. */
178 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
179 u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
180 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
183 /* Cost model for near-optimal parsing */
186 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
187 * length 'len' match that has an offset belonging to 'offset_slot'. */
188 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
190 /* Cost for each symbol in the main code */
191 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
193 /* Cost for each symbol in the length code */
194 u32 len[LZX_LENCODE_NUM_SYMBOLS];
196 #if LZX_CONSIDER_ALIGNED_COSTS
197 /* Cost for each symbol in the aligned code */
198 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
202 /* Codewords and lengths for the LZX Huffman codes. */
204 struct lzx_codewords codewords;
205 struct lzx_lens lens;
208 /* Symbol frequency counters for the LZX Huffman codes. */
210 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
211 u32 len[LZX_LENCODE_NUM_SYMBOLS];
212 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
216 * Represents a run of literals followed by a match or end-of-block. This
217 * struct is needed to temporarily store items chosen by the parser, since items
218 * cannot be written until all items for the block have been chosen and the
219 * block's Huffman codes have been computed.
221 struct lzx_sequence {
223 /* The number of literals in the run. This may be 0. The literals are
224 * not stored explicitly in this structure; instead, they are read
225 * directly from the uncompressed data. */
228 /* If the next field doesn't indicate end-of-block, then this is the
229 * match length minus LZX_MIN_MATCH_LEN. */
232 /* If bit 31 is clear, then this field contains the match header in bits
233 * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
234 * Otherwise, this sequence's literal run was the last literal run in
235 * the block, so there is no match that follows it. */
236 u32 adjusted_offset_and_match_hdr;
240 * This structure represents a byte position in the input buffer and a node in
241 * the graph of possible match/literal choices.
243 * Logically, each incoming edge to this node is labeled with a literal or a
244 * match that can be taken to reach this position from an earlier position; and
245 * each outgoing edge from this node is labeled with a literal or a match that
246 * can be taken to advance from this position to a later position.
248 struct lzx_optimum_node {
250 /* The cost, in bits, of the lowest-cost path that has been found to
251 * reach this position. This can change as progressively lower cost
252 * paths are found to reach this position. */
256 * The match or literal that was taken to reach this position. This can
257 * change as progressively lower cost paths are found to reach this
260 * This variable is divided into two bitfields.
263 * Low bits are 0, high bits are the literal.
265 * Explicit offset matches:
266 * Low bits are the match length, high bits are the offset plus 2.
268 * Repeat offset matches:
269 * Low bits are the match length, high bits are the queue index.
272 #define OPTIMUM_OFFSET_SHIFT 9
273 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
274 } _aligned_attribute(8);
277 * Least-recently-used queue for match offsets.
279 * This is represented as a 64-bit integer for efficiency. There are three
280 * offsets of 21 bits each. Bit 64 is garbage.
282 struct lzx_lru_queue {
286 #define LZX_QUEUE64_OFFSET_SHIFT 21
287 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
289 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
290 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
291 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
293 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
294 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
295 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
298 lzx_lru_queue_init(struct lzx_lru_queue *queue)
300 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
301 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
302 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
306 lzx_lru_queue_R0(struct lzx_lru_queue queue)
308 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
312 lzx_lru_queue_R1(struct lzx_lru_queue queue)
314 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
318 lzx_lru_queue_R2(struct lzx_lru_queue queue)
320 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
323 /* Push a match offset onto the front (most recently used) end of the queue. */
324 static inline struct lzx_lru_queue
325 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
327 return (struct lzx_lru_queue) {
328 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
332 /* Pop a match offset off the front (most recently used) end of the queue. */
334 lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
336 u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
337 queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
341 /* Swap a match offset to the front of the queue. */
342 static inline struct lzx_lru_queue
343 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
349 return (struct lzx_lru_queue) {
350 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
351 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
352 (queue.R & LZX_QUEUE64_R2_MASK),
355 return (struct lzx_lru_queue) {
356 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
357 (queue.R & LZX_QUEUE64_R1_MASK) |
358 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
362 /* The main LZX compressor structure */
363 struct lzx_compressor {
365 /* The "nice" match length: if a match of this length is found, then
366 * choose it immediately without further consideration. */
367 unsigned nice_match_length;
369 /* The maximum search depth: consider at most this many potential
370 * matches at each position. */
371 unsigned max_search_depth;
373 /* The log base 2 of the LZX window size for LZ match offset encoding
374 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
375 * LZX_MAX_WINDOW_ORDER. */
376 unsigned window_order;
378 /* The number of symbols in the main alphabet. This depends on
379 * @window_order, since @window_order determines the maximum possible
381 unsigned num_main_syms;
383 /* Number of optimization passes per block */
384 unsigned num_optim_passes;
386 /* The preprocessed buffer of data being compressed */
389 /* The number of bytes of data to be compressed, which is the number of
390 * bytes of data in @in_buffer that are actually valid. */
393 /* Pointer to the compress() implementation chosen at allocation time */
394 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
396 /* If true, the compressor need not preserve the input buffer if it
397 * compresses the data successfully. */
400 /* The Huffman symbol frequency counters for the current block. */
401 struct lzx_freqs freqs;
403 /* The Huffman codes for the current and previous blocks. The one with
404 * index 'codes_index' is for the current block, and the other one is
405 * for the previous block. */
406 struct lzx_codes codes[2];
407 unsigned codes_index;
409 /* The matches and literals that the parser has chosen for the current
410 * block. The required length of this array is limited by the maximum
411 * number of matches that can ever be chosen for a single block. */
412 struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
414 /* Tables for mapping adjusted offsets to offset slots */
416 /* offset slots [0, 29] */
417 u8 offset_slot_tab_1[32768];
419 /* offset slots [30, 49] */
420 u8 offset_slot_tab_2[128];
423 /* Data for greedy or lazy parsing */
425 /* Hash chains matchfinder (MUST BE LAST!!!) */
426 struct hc_matchfinder hc_mf;
429 /* Data for near-optimal parsing */
432 * The graph nodes for the current block.
434 * We need at least 'LZX_DIV_BLOCK_SIZE +
435 * LZX_MAX_MATCH_LEN - 1' nodes because that is the
436 * maximum block size that may be used. Add 1 because
437 * we need a node to represent end-of-block.
439 * It is possible that nodes past end-of-block are
440 * accessed during match consideration, but this can
441 * only occur if the block was truncated at
442 * LZX_DIV_BLOCK_SIZE. So the same bound still applies.
443 * Note that since nodes past the end of the block will
444 * never actually have an effect on the items that are
445 * chosen for the block, it makes no difference what
446 * their costs are initialized to (if anything).
448 struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
449 LZX_MAX_MATCH_LEN - 1 + 1];
451 /* The cost model for the current block */
452 struct lzx_costs costs;
455 * Cached matches for the current block. This array
456 * contains the matches that were found at each position
457 * in the block. Specifically, for each position, there
458 * is a special 'struct lz_match' whose 'length' field
459 * contains the number of matches that were found at
460 * that position; this is followed by the matches
461 * themselves, if any, sorted by strictly increasing
464 * Note: in rare cases, there will be a very high number
465 * of matches in the block and this array will overflow.
466 * If this happens, we force the end of the current
467 * block. LZX_CACHE_LENGTH is the length at which we
468 * actually check for overflow. The extra slots beyond
469 * this are enough to absorb the worst case overflow,
470 * which occurs if starting at
471 * &match_cache[LZX_CACHE_LENGTH - 1], we write the
472 * match count header, then write
473 * LZX_MAX_MATCHES_PER_POS matches, then skip searching
474 * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
475 * write the match count header for each.
477 struct lz_match match_cache[LZX_CACHE_LENGTH +
478 LZX_MAX_MATCHES_PER_POS +
479 LZX_MAX_MATCH_LEN - 1];
481 /* Hash table for finding length 2 matches */
482 pos_t hash2_tab[LZX_HASH2_LENGTH];
484 /* Binary trees matchfinder (MUST BE LAST!!!) */
485 struct bt_matchfinder bt_mf;
491 * Structure to keep track of the current state of sending bits to the
492 * compressed output buffer.
494 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
496 struct lzx_output_bitstream {
498 /* Bits that haven't yet been written to the output buffer. */
499 machine_word_t bitbuf;
501 /* Number of bits currently held in @bitbuf. */
504 /* Pointer to the start of the output buffer. */
507 /* Pointer to the position in the output buffer at which the next coding
508 * unit should be written. */
511 /* Pointer just past the end of the output buffer, rounded down to a
512 * 2-byte boundary. */
516 /* Can the specified number of bits always be added to 'bitbuf' after any
517 * pending 16-bit coding units have been flushed? */
518 #define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16)
521 * Initialize the output bitstream.
524 * The output bitstream structure to initialize.
526 * The buffer being written to.
528 * Size of @buffer, in bytes.
531 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
536 os->next = os->start;
537 os->end = os->start + (size & ~1);
540 /* Add some bits to the bitbuffer variable of the output bitstream. The caller
541 * must make sure there is enough room. */
543 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
545 os->bitbuf = (os->bitbuf << num_bits) | bits;
546 os->bitcount += num_bits;
549 /* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
550 * specifies the maximum number of bits that may have been added since the last
553 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
555 if (os->end - os->next < 6)
557 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
558 if (max_num_bits > 16)
559 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
560 if (max_num_bits > 32)
561 put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
562 os->next += (os->bitcount >> 4) << 1;
566 /* Add at most 16 bits to the bitbuffer and flush it. */
568 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
570 lzx_add_bits(os, bits, num_bits);
571 lzx_flush_bits(os, 16);
575 * Flush the last coding unit to the output buffer if needed. Return the total
576 * number of bytes written to the output buffer, or 0 if an overflow occurred.
579 lzx_flush_output(struct lzx_output_bitstream *os)
581 if (os->end - os->next < 6)
584 if (os->bitcount != 0) {
585 put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
589 return os->next - os->start;
592 /* Build the main, length, and aligned offset Huffman codes used in LZX.
594 * This takes as input the frequency tables for each code and produces as output
595 * a set of tables that map symbols to codewords and codeword lengths. */
597 lzx_make_huffman_codes(struct lzx_compressor *c)
599 const struct lzx_freqs *freqs = &c->freqs;
600 struct lzx_codes *codes = &c->codes[c->codes_index];
602 STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
603 MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
604 STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
605 LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
606 STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
607 ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
609 make_canonical_huffman_code(c->num_main_syms,
613 codes->codewords.main);
615 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
616 LENGTH_CODEWORD_LIMIT,
619 codes->codewords.len);
621 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
622 ALIGNED_CODEWORD_LIMIT,
625 codes->codewords.aligned);
628 /* Reset the symbol frequencies for the LZX Huffman codes. */
630 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
632 memset(&c->freqs, 0, sizeof(c->freqs));
636 lzx_compute_precode_items(const u8 lens[restrict],
637 const u8 prev_lens[restrict],
638 u32 precode_freqs[restrict],
639 unsigned precode_items[restrict])
648 itemptr = precode_items;
651 while (!((len = lens[run_start]) & 0x80)) {
653 /* len = the length being repeated */
655 /* Find the next run of codeword lengths. */
657 run_end = run_start + 1;
659 /* Fast case for a single length. */
660 if (likely(len != lens[run_end])) {
661 delta = prev_lens[run_start] - len;
664 precode_freqs[delta]++;
670 /* Extend the run. */
673 } while (len == lens[run_end]);
678 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
679 while ((run_end - run_start) >= 20) {
680 extra_bits = min((run_end - run_start) - 20, 0x1f);
682 *itemptr++ = 18 | (extra_bits << 5);
683 run_start += 20 + extra_bits;
686 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
687 if ((run_end - run_start) >= 4) {
688 extra_bits = min((run_end - run_start) - 4, 0xf);
690 *itemptr++ = 17 | (extra_bits << 5);
691 run_start += 4 + extra_bits;
695 /* A run of nonzero lengths. */
697 /* Symbol 19: RLE 4 to 5 of any length at a time. */
698 while ((run_end - run_start) >= 4) {
699 extra_bits = (run_end - run_start) > 4;
700 delta = prev_lens[run_start] - len;
704 precode_freqs[delta]++;
705 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
706 run_start += 4 + extra_bits;
710 /* Output any remaining lengths without RLE. */
711 while (run_start != run_end) {
712 delta = prev_lens[run_start] - len;
715 precode_freqs[delta]++;
721 return itemptr - precode_items;
725 * Output a Huffman code in the compressed form used in LZX.
727 * The Huffman code is represented in the output as a logical series of codeword
728 * lengths from which the Huffman code, which must be in canonical form, can be
731 * The codeword lengths are themselves compressed using a separate Huffman code,
732 * the "precode", which contains a symbol for each possible codeword length in
733 * the larger code as well as several special symbols to represent repeated
734 * codeword lengths (a form of run-length encoding). The precode is itself
735 * constructed in canonical form, and its codeword lengths are represented
736 * literally in 20 4-bit fields that immediately precede the compressed codeword
737 * lengths of the larger code.
739 * Furthermore, the codeword lengths of the larger code are actually represented
740 * as deltas from the codeword lengths of the corresponding code in the previous
744 * Bitstream to which to write the compressed Huffman code.
746 * The codeword lengths, indexed by symbol, in the Huffman code.
748 * The codeword lengths, indexed by symbol, in the corresponding Huffman
749 * code in the previous block, or all zeroes if this is the first block.
751 * The number of symbols in the Huffman code.
754 lzx_write_compressed_code(struct lzx_output_bitstream *os,
755 const u8 lens[restrict],
756 const u8 prev_lens[restrict],
759 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
760 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
761 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
762 unsigned precode_items[num_lens];
763 unsigned num_precode_items;
764 unsigned precode_item;
765 unsigned precode_sym;
767 u8 saved = lens[num_lens];
768 *(u8 *)(lens + num_lens) = 0x80;
770 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
771 precode_freqs[i] = 0;
773 /* Compute the "items" (RLE / literal tokens and extra bits) with which
774 * the codeword lengths in the larger code will be output. */
775 num_precode_items = lzx_compute_precode_items(lens,
780 /* Build the precode. */
781 STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
782 PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
783 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
785 precode_freqs, precode_lens,
788 /* Output the lengths of the codewords in the precode. */
789 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
790 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
792 /* Output the encoded lengths of the codewords in the larger code. */
793 for (i = 0; i < num_precode_items; i++) {
794 precode_item = precode_items[i];
795 precode_sym = precode_item & 0x1F;
796 lzx_add_bits(os, precode_codewords[precode_sym],
797 precode_lens[precode_sym]);
798 if (precode_sym >= 17) {
799 if (precode_sym == 17) {
800 lzx_add_bits(os, precode_item >> 5, 4);
801 } else if (precode_sym == 18) {
802 lzx_add_bits(os, precode_item >> 5, 5);
804 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
805 precode_sym = precode_item >> 6;
806 lzx_add_bits(os, precode_codewords[precode_sym],
807 precode_lens[precode_sym]);
810 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
811 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
814 *(u8 *)(lens + num_lens) = saved;
818 * Write all matches and literal bytes (which were precomputed) in an LZX
819 * compressed block to the output bitstream in the final compressed
823 * The output bitstream.
825 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
826 * LZX_BLOCKTYPE_VERBATIM).
828 * The uncompressed data of the block.
830 * The matches and literals to output, given as a series of sequences.
832 * The main, length, and aligned offset Huffman codes for the current
833 * LZX compressed block.
836 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
837 const u8 *block_data, const struct lzx_sequence sequences[],
838 const struct lzx_codes *codes)
840 const struct lzx_sequence *seq = sequences;
841 u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
844 /* Output the next sequence. */
846 unsigned litrunlen = seq->litrunlen;
848 unsigned main_symbol;
849 unsigned adjusted_length;
851 unsigned offset_slot;
852 unsigned num_extra_bits;
855 /* Output the literal run of the sequence. */
857 if (litrunlen) { /* Is the literal run nonempty? */
859 /* Verify optimization is enabled on 64-bit */
860 STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
861 CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
863 if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
865 /* 64-bit: write 4 literals at a time. */
866 while (litrunlen >= 4) {
867 unsigned lit0 = block_data[0];
868 unsigned lit1 = block_data[1];
869 unsigned lit2 = block_data[2];
870 unsigned lit3 = block_data[3];
871 lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
872 lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
873 lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
874 lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
875 lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
880 unsigned lit = *block_data++;
881 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
883 unsigned lit = *block_data++;
884 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
886 unsigned lit = *block_data++;
887 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
888 lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
890 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
893 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
897 /* 32-bit: write 1 literal at a time. */
899 unsigned lit = *block_data++;
900 lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
901 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
902 } while (--litrunlen);
906 /* Was this the last literal run? */
907 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
910 /* Nope; output the match. */
912 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
913 main_symbol = LZX_NUM_CHARS + match_hdr;
914 adjusted_length = seq->adjusted_length;
916 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
918 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
919 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
921 num_extra_bits = lzx_extra_offset_bits[offset_slot];
922 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
924 #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
925 14 + ALIGNED_CODEWORD_LIMIT)
927 /* Verify optimization is enabled on 64-bit */
928 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
930 /* Output the main symbol for the match. */
932 lzx_add_bits(os, codes->codewords.main[main_symbol],
933 codes->lens.main[main_symbol]);
934 if (!CAN_BUFFER(MAX_MATCH_BITS))
935 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
937 /* If needed, output the length symbol for the match. */
939 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
940 lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
941 codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
942 if (!CAN_BUFFER(MAX_MATCH_BITS))
943 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
946 /* Output the extra offset bits for the match. In aligned
947 * offset blocks, the lowest 3 bits of the adjusted offset are
948 * Huffman-encoded using the aligned offset code, provided that
949 * there are at least extra 3 offset bits required. All other
950 * extra offset bits are output verbatim. */
952 if ((adjusted_offset & ones_if_aligned) >= 16) {
954 lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
955 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
956 if (!CAN_BUFFER(MAX_MATCH_BITS))
957 lzx_flush_bits(os, 14);
959 lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
960 codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
961 if (!CAN_BUFFER(MAX_MATCH_BITS))
962 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
964 lzx_add_bits(os, extra_bits, num_extra_bits);
965 if (!CAN_BUFFER(MAX_MATCH_BITS))
966 lzx_flush_bits(os, 17);
969 if (CAN_BUFFER(MAX_MATCH_BITS))
970 lzx_flush_bits(os, MAX_MATCH_BITS);
972 /* Advance to the next sequence. */
978 lzx_write_compressed_block(const u8 *block_begin,
981 unsigned window_order,
982 unsigned num_main_syms,
983 const struct lzx_sequence sequences[],
984 const struct lzx_codes * codes,
985 const struct lzx_lens * prev_lens,
986 struct lzx_output_bitstream * os)
988 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
989 block_type == LZX_BLOCKTYPE_VERBATIM);
991 /* The first three bits indicate the type of block and are one of the
992 * LZX_BLOCKTYPE_* constants. */
993 lzx_write_bits(os, block_type, 3);
995 /* Output the block size.
997 * The original LZX format seemed to always encode the block size in 3
998 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
999 * uses the first bit to indicate whether the block is the default size
1000 * (32768) or a different size given explicitly by the next 16 bits.
1002 * By default, this compressor uses a window size of 32768 and therefore
1003 * follows the WIMGAPI behavior. However, this compressor also supports
1004 * window sizes greater than 32768 bytes, which do not appear to be
1005 * supported by WIMGAPI. In such cases, we retain the default size bit
1006 * to mean a size of 32768 bytes but output non-default block size in 24
1007 * bits rather than 16. The compatibility of this behavior is unknown
1008 * because WIMs created with chunk size greater than 32768 can seemingly
1009 * only be opened by wimlib anyway. */
1010 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1011 lzx_write_bits(os, 1, 1);
1013 lzx_write_bits(os, 0, 1);
1015 if (window_order >= 16)
1016 lzx_write_bits(os, block_size >> 16, 8);
1018 lzx_write_bits(os, block_size & 0xFFFF, 16);
1021 /* If it's an aligned offset block, output the aligned offset code. */
1022 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1023 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1024 lzx_write_bits(os, codes->lens.aligned[i],
1025 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1029 /* Output the main code (two parts). */
1030 lzx_write_compressed_code(os, codes->lens.main,
1033 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1034 prev_lens->main + LZX_NUM_CHARS,
1035 num_main_syms - LZX_NUM_CHARS);
1037 /* Output the length code. */
1038 lzx_write_compressed_code(os, codes->lens.len,
1040 LZX_LENCODE_NUM_SYMBOLS);
1042 /* Output the compressed matches and literals. */
1043 lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1046 /* Given the frequencies of symbols in an LZX-compressed block and the
1047 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1048 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1049 * will take fewer bits to output. */
1051 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1052 const struct lzx_codes * codes)
1054 u32 aligned_cost = 0;
1055 u32 verbatim_cost = 0;
1057 /* A verbatim block requires 3 bits in each place that an aligned symbol
1058 * would be used in an aligned offset block. */
1059 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1060 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1061 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1064 /* Account for output of the aligned offset code. */
1065 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1067 if (aligned_cost < verbatim_cost)
1068 return LZX_BLOCKTYPE_ALIGNED;
1070 return LZX_BLOCKTYPE_VERBATIM;
1074 * Return the offset slot for the specified adjusted match offset, using the
1075 * compressor's acceleration tables to speed up the mapping.
1077 static inline unsigned
1078 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
1080 if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1081 return c->offset_slot_tab_1[adjusted_offset];
1082 return c->offset_slot_tab_2[adjusted_offset >> 14];
1086 * Finish an LZX block:
1088 * - build the Huffman codes
1089 * - decide whether to output the block as VERBATIM or ALIGNED
1090 * - output the block
1091 * - swap the indices of the current and previous Huffman codes
1094 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1095 const u8 *block_begin, u32 block_size, u32 seq_idx)
1099 lzx_make_huffman_codes(c);
1101 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1102 &c->codes[c->codes_index]);
1103 lzx_write_compressed_block(block_begin,
1108 &c->chosen_sequences[seq_idx],
1109 &c->codes[c->codes_index],
1110 &c->codes[c->codes_index ^ 1].lens,
1112 c->codes_index ^= 1;
1115 /* Tally the Huffman symbol for a literal and increment the literal run length.
1118 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1120 c->freqs.main[literal]++;
1124 /* Tally the Huffman symbol for a match, save the match data and the length of
1125 * the preceding literal run in the next lzx_sequence, and update the recent
1128 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1129 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1130 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1132 u32 litrunlen = *litrunlen_p;
1133 struct lzx_sequence *next_seq = *next_seq_p;
1134 unsigned offset_slot;
1137 v = length - LZX_MIN_MATCH_LEN;
1139 /* Save the literal run length and adjusted length. */
1140 next_seq->litrunlen = litrunlen;
1141 next_seq->adjusted_length = v;
1143 /* Compute the length header and tally the length symbol if needed */
1144 if (v >= LZX_NUM_PRIMARY_LENS) {
1145 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1146 v = LZX_NUM_PRIMARY_LENS;
1149 /* Compute the offset slot */
1150 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1152 /* Compute the match header. */
1153 v += offset_slot * LZX_NUM_LEN_HEADERS;
1155 /* Save the adjusted offset and match header. */
1156 next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1158 /* Tally the main symbol. */
1159 c->freqs.main[LZX_NUM_CHARS + v]++;
1161 /* Update the recent offsets queue. */
1162 if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1163 /* Repeat offset match */
1164 swap(recent_offsets[0], recent_offsets[offset_data]);
1166 /* Explicit offset match */
1168 /* Tally the aligned offset symbol if needed */
1169 if (offset_data >= 16)
1170 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1172 recent_offsets[2] = recent_offsets[1];
1173 recent_offsets[1] = recent_offsets[0];
1174 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1177 /* Reset the literal run length and advance to the next sequence. */
1178 *next_seq_p = next_seq + 1;
1182 /* Finish the last lzx_sequence. The last lzx_sequence is just a literal run;
1183 * there is no match. This literal run may be empty. */
1185 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1187 last_seq->litrunlen = litrunlen;
1189 /* Special value to mark last sequence */
1190 last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1194 * Given the minimum-cost path computed through the item graph for the current
1195 * block, walk the path and count how many of each symbol in each Huffman-coded
1196 * alphabet would be required to output the items (matches and literals) along
1199 * Note that the path will be walked backwards (from the end of the block to the
1200 * beginning of the block), but this doesn't matter because this function only
1201 * computes frequencies.
1204 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
1206 u32 node_idx = block_size;
1211 unsigned offset_slot;
1213 /* Tally literals until either a match or the beginning of the
1214 * block is reached. */
1216 u32 item = c->optimum_nodes[node_idx].item;
1218 len = item & OPTIMUM_LEN_MASK;
1219 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1221 if (len != 0) /* Not a literal? */
1224 /* Tally the main symbol for the literal. */
1225 c->freqs.main[offset_data]++;
1227 if (--node_idx == 0) /* Beginning of block was reached? */
1233 /* Tally a match. */
1235 /* Tally the aligned offset symbol if needed. */
1236 if (offset_data >= 16)
1237 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1239 /* Tally the length symbol if needed. */
1240 v = len - LZX_MIN_MATCH_LEN;;
1241 if (v >= LZX_NUM_PRIMARY_LENS) {
1242 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1243 v = LZX_NUM_PRIMARY_LENS;
1246 /* Tally the main symbol. */
1247 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1248 v += offset_slot * LZX_NUM_LEN_HEADERS;
1249 c->freqs.main[LZX_NUM_CHARS + v]++;
1251 if (node_idx == 0) /* Beginning of block was reached? */
1257 * Like lzx_tally_item_list(), but this function also generates the list of
1258 * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1259 * ready to be output to the bitstream after the Huffman codes are computed.
1260 * The lzx_sequences will be written to decreasing memory addresses as the path
1261 * is walked backwards, which means they will end up in the expected
1262 * first-to-last order. The return value is the index in c->chosen_sequences at
1263 * which the lzx_sequences begin.
1266 lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
1268 u32 node_idx = block_size;
1269 u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1272 /* Special value to mark last sequence */
1273 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1275 lit_start_node = node_idx;
1280 unsigned offset_slot;
1282 /* Record literals until either a match or the beginning of the
1283 * block is reached. */
1285 u32 item = c->optimum_nodes[node_idx].item;
1287 len = item & OPTIMUM_LEN_MASK;
1288 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1290 if (len != 0) /* Not a literal? */
1293 /* Tally the main symbol for the literal. */
1294 c->freqs.main[offset_data]++;
1296 if (--node_idx == 0) /* Beginning of block was reached? */
1300 /* Save the literal run length for the next sequence (the
1301 * "previous sequence" when walking backwards). */
1302 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1304 lit_start_node = node_idx;
1306 /* Record a match. */
1308 /* Tally the aligned offset symbol if needed. */
1309 if (offset_data >= 16)
1310 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1312 /* Save the adjusted length. */
1313 v = len - LZX_MIN_MATCH_LEN;
1314 c->chosen_sequences[seq_idx].adjusted_length = v;
1316 /* Tally the length symbol if needed. */
1317 if (v >= LZX_NUM_PRIMARY_LENS) {
1318 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1319 v = LZX_NUM_PRIMARY_LENS;
1322 /* Tally the main symbol. */
1323 offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1324 v += offset_slot * LZX_NUM_LEN_HEADERS;
1325 c->freqs.main[LZX_NUM_CHARS + v]++;
1327 /* Save the adjusted offset and match header. */
1328 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1329 (offset_data << 9) | v;
1331 if (node_idx == 0) /* Beginning of block was reached? */
1336 /* Save the literal run length for the first sequence. */
1337 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1339 /* Return the index in c->chosen_sequences at which the lzx_sequences
1345 * Find an inexpensive path through the graph of possible match/literal choices
1346 * for the current block. The nodes of the graph are
1347 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1348 * the current block, plus one extra node for end-of-block. The edges of the
1349 * graph are matches and literals. The goal is to find the minimum cost path
1350 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1352 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1353 * proceeding forwards one node at a time. At each node, a selection of matches
1354 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1355 * length 'len' provides a new path to reach the node 'len' bytes later. If
1356 * such a path is the lowest cost found so far to reach that later node, then
1357 * that later node is updated with the new path.
1359 * Note that although this algorithm is based on minimum cost path search, due
1360 * to various simplifying assumptions the result is not guaranteed to be the
1361 * true minimum cost, or "optimal", path over the graph of all valid LZX
1362 * representations of this block.
1364 * Also, note that because of the presence of the recent offsets queue (which is
1365 * a type of adaptive state), the algorithm cannot work backwards and compute
1366 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1367 * algorithm handles this adaptive state in the "minimum cost" parse is actually
1368 * only an approximation. It's possible for the globally optimal, minimum cost
1369 * path to contain a prefix, ending at a position, where that path prefix is
1370 * *not* the minimum cost path to that position. This can happen if such a path
1371 * prefix results in a different adaptive state which results in lower costs
1372 * later. The algorithm does not solve this problem; it only considers the
1373 * lowest cost to reach each individual position.
1375 static struct lzx_lru_queue
1376 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1377 const u8 * const restrict block_begin,
1378 const u32 block_size,
1379 const struct lzx_lru_queue initial_queue)
1381 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1382 struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1383 struct lz_match *cache_ptr = c->match_cache;
1384 const u8 *in_next = block_begin;
1385 const u8 * const block_end = block_begin + block_size;
1387 /* Instead of storing the match offset LRU queues in the
1388 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1389 * storing them in a smaller array. This works because the algorithm
1390 * only requires a limited history of the adaptive state. Once a given
1391 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1392 * it is no longer needed. */
1393 struct lzx_lru_queue queues[512];
1395 STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1396 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1398 /* Initially, the cost to reach each node is "infinity". */
1399 memset(c->optimum_nodes, 0xFF,
1400 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1402 QUEUE(block_begin) = initial_queue;
1404 /* The following loop runs 'block_size' iterations, one per node. */
1406 unsigned num_matches;
1411 * A selection of matches for the block was already saved in
1412 * memory so that we don't have to run the uncompressed data
1413 * through the matchfinder on every optimization pass. However,
1414 * we still search for repeat offset matches during each
1415 * optimization pass because we cannot predict the state of the
1416 * recent offsets queue. But as a heuristic, we don't bother
1417 * searching for repeat offset matches if the general-purpose
1418 * matchfinder failed to find any matches.
1420 * Note that a match of length n at some offset implies there is
1421 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1422 * that same offset. In other words, we don't necessarily need
1423 * to use the full length of a match. The key heuristic that
1424 * saves a significicant amount of time is that for each
1425 * distinct length, we only consider the smallest offset for
1426 * which that length is available. This heuristic also applies
1427 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1428 * any explicit offset. Of course, this heuristic may be
1429 * produce suboptimal results because offset slots in LZX are
1430 * subject to entropy encoding, but in practice this is a useful
1434 num_matches = cache_ptr->length;
1438 struct lz_match *end_matches = cache_ptr + num_matches;
1439 unsigned next_len = LZX_MIN_MATCH_LEN;
1440 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1443 /* Consider R0 match */
1444 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1445 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1447 STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1449 u32 cost = cur_node->cost +
1450 c->costs.match_cost[0][
1451 next_len - LZX_MIN_MATCH_LEN];
1452 if (cost <= (cur_node + next_len)->cost) {
1453 (cur_node + next_len)->cost = cost;
1454 (cur_node + next_len)->item =
1455 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1457 if (unlikely(++next_len > max_len)) {
1458 cache_ptr = end_matches;
1461 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1465 /* Consider R1 match */
1466 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1467 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1469 if (matchptr[next_len - 1] != in_next[next_len - 1])
1471 for (unsigned len = 2; len < next_len - 1; len++)
1472 if (matchptr[len] != in_next[len])
1475 u32 cost = cur_node->cost +
1476 c->costs.match_cost[1][
1477 next_len - LZX_MIN_MATCH_LEN];
1478 if (cost <= (cur_node + next_len)->cost) {
1479 (cur_node + next_len)->cost = cost;
1480 (cur_node + next_len)->item =
1481 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1483 if (unlikely(++next_len > max_len)) {
1484 cache_ptr = end_matches;
1487 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1491 /* Consider R2 match */
1492 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1493 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1495 if (matchptr[next_len - 1] != in_next[next_len - 1])
1497 for (unsigned len = 2; len < next_len - 1; len++)
1498 if (matchptr[len] != in_next[len])
1501 u32 cost = cur_node->cost +
1502 c->costs.match_cost[2][
1503 next_len - LZX_MIN_MATCH_LEN];
1504 if (cost <= (cur_node + next_len)->cost) {
1505 (cur_node + next_len)->cost = cost;
1506 (cur_node + next_len)->item =
1507 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1509 if (unlikely(++next_len > max_len)) {
1510 cache_ptr = end_matches;
1513 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1517 while (next_len > cache_ptr->length)
1518 if (++cache_ptr == end_matches)
1521 /* Consider explicit offset matches */
1523 u32 offset = cache_ptr->offset;
1524 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1525 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
1527 u32 cost = cur_node->cost +
1528 c->costs.match_cost[offset_slot][
1529 next_len - LZX_MIN_MATCH_LEN];
1530 #if LZX_CONSIDER_ALIGNED_COSTS
1531 if (lzx_extra_offset_bits[offset_slot] >=
1532 LZX_NUM_ALIGNED_OFFSET_BITS)
1533 cost += c->costs.aligned[offset_data &
1534 LZX_ALIGNED_OFFSET_BITMASK];
1536 if (cost < (cur_node + next_len)->cost) {
1537 (cur_node + next_len)->cost = cost;
1538 (cur_node + next_len)->item =
1539 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1541 } while (++next_len <= cache_ptr->length);
1542 } while (++cache_ptr != end_matches);
1547 /* Consider coding a literal.
1549 * To avoid an extra branch, actually checking the preferability
1550 * of coding the literal is integrated into the queue update
1552 literal = *in_next++;
1553 cost = cur_node->cost +
1554 c->costs.main[lzx_main_symbol_for_literal(literal)];
1556 /* Advance to the next position. */
1559 /* The lowest-cost path to the current position is now known.
1560 * Finalize the recent offsets queue that results from taking
1561 * this lowest-cost path. */
1563 if (cost <= cur_node->cost) {
1564 /* Literal: queue remains unchanged. */
1565 cur_node->cost = cost;
1566 cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1567 QUEUE(in_next) = QUEUE(in_next - 1);
1569 /* Match: queue update is needed. */
1570 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1571 u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1572 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1573 /* Explicit offset match: insert offset at front */
1575 lzx_lru_queue_push(QUEUE(in_next - len),
1576 offset_data - LZX_OFFSET_ADJUSTMENT);
1578 /* Repeat offset match: swap offset to front */
1580 lzx_lru_queue_swap(QUEUE(in_next - len),
1584 } while (cur_node != end_node);
1586 /* Return the match offset queue at the end of the minimum cost path. */
1587 return QUEUE(block_end);
1590 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1592 lzx_compute_match_costs(struct lzx_compressor *c)
1594 unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
1595 struct lzx_costs *costs = &c->costs;
1597 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1599 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1600 unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
1603 #if LZX_CONSIDER_ALIGNED_COSTS
1604 if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
1605 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1608 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1609 costs->match_cost[offset_slot][i] =
1610 costs->main[main_symbol++] + extra_cost;
1612 extra_cost += costs->main[main_symbol];
1614 for (; i < LZX_NUM_LENS; i++)
1615 costs->match_cost[offset_slot][i] =
1616 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1620 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1623 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1626 bool have_byte[256];
1627 unsigned num_used_bytes;
1629 /* The costs below are hard coded to use a scaling factor of 16. */
1630 STATIC_ASSERT(LZX_BIT_COST == 16);
1635 * - Use smaller initial costs for literal symbols when the input buffer
1636 * contains fewer distinct bytes.
1638 * - Assume that match symbols are more costly than literal symbols.
1640 * - Assume that length symbols for shorter lengths are less costly than
1641 * length symbols for longer lengths.
1644 for (i = 0; i < 256; i++)
1645 have_byte[i] = false;
1647 for (i = 0; i < block_size; i++)
1648 have_byte[block[i]] = true;
1651 for (i = 0; i < 256; i++)
1652 num_used_bytes += have_byte[i];
1654 for (i = 0; i < 256; i++)
1655 c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
1657 for (; i < c->num_main_syms; i++)
1658 c->costs.main[i] = 170;
1660 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1661 c->costs.len[i] = 103 + (i / 4);
1663 #if LZX_CONSIDER_ALIGNED_COSTS
1664 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1665 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1668 lzx_compute_match_costs(c);
1671 /* Update the current cost model to reflect the computed Huffman codes. */
1673 lzx_update_costs(struct lzx_compressor *c)
1676 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1678 for (i = 0; i < c->num_main_syms; i++)
1679 c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
1681 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1682 c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
1684 #if LZX_CONSIDER_ALIGNED_COSTS
1685 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1686 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
1689 lzx_compute_match_costs(c);
1692 static struct lzx_lru_queue
1693 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1694 struct lzx_output_bitstream * const restrict os,
1695 const u8 * const restrict block_begin,
1696 const u32 block_size,
1697 const struct lzx_lru_queue initial_queue)
1699 unsigned num_passes_remaining = c->num_optim_passes;
1700 struct lzx_lru_queue new_queue;
1703 /* The first optimization pass uses a default cost model. Each
1704 * additional optimization pass uses a cost model derived from the
1705 * Huffman code computed in the previous pass. */
1707 lzx_set_default_costs(c, block_begin, block_size);
1708 lzx_reset_symbol_frequencies(c);
1710 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1712 if (num_passes_remaining > 1) {
1713 lzx_tally_item_list(c, block_size);
1714 lzx_make_huffman_codes(c);
1715 lzx_update_costs(c);
1716 lzx_reset_symbol_frequencies(c);
1718 } while (--num_passes_remaining);
1720 seq_idx = lzx_record_item_list(c, block_size);
1721 lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1726 * This is the "near-optimal" LZX compressor.
1728 * For each block, it performs a relatively thorough graph search to find an
1729 * inexpensive (in terms of compressed size) way to output that block.
1731 * Note: there are actually many things this algorithm leaves on the table in
1732 * terms of compression ratio. So although it may be "near-optimal", it is
1733 * certainly not "optimal". The goal is not to produce the optimal compression
1734 * ratio, which for LZX is probably impossible within any practical amount of
1735 * time, but rather to produce a compression ratio significantly better than a
1736 * simpler "greedy" or "lazy" parse while still being relatively fast.
1739 lzx_compress_near_optimal(struct lzx_compressor *c,
1740 struct lzx_output_bitstream *os)
1742 const u8 * const in_begin = c->in_buffer;
1743 const u8 * in_next = in_begin;
1744 const u8 * const in_end = in_begin + c->in_nbytes;
1745 unsigned max_len = LZX_MAX_MATCH_LEN;
1746 unsigned nice_len = min(c->nice_match_length, max_len);
1748 struct lzx_lru_queue queue;
1750 bt_matchfinder_init(&c->bt_mf);
1751 memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
1752 next_hash = bt_matchfinder_hash_3_bytes(in_next);
1753 lzx_lru_queue_init(&queue);
1756 /* Starting a new block */
1757 const u8 * const in_block_begin = in_next;
1758 const u8 * const in_block_end =
1759 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1761 /* Run the block through the matchfinder and cache the matches. */
1762 struct lz_match *cache_ptr = c->match_cache;
1764 struct lz_match *lz_matchptr;
1769 /* If approaching the end of the input buffer, adjust
1770 * 'max_len' and 'nice_len' accordingly. */
1771 if (unlikely(max_len > in_end - in_next)) {
1772 max_len = in_end - in_next;
1773 nice_len = min(max_len, nice_len);
1775 /* This extra check is needed to ensure that we
1776 * never output a length 2 match of the very
1777 * last two bytes with the very first two bytes,
1778 * since such a match has an offset too large to
1779 * be represented. */
1780 if (unlikely(max_len < 3)) {
1782 cache_ptr->length = 0;
1788 lz_matchptr = cache_ptr + 1;
1790 /* Check for a length 2 match. */
1791 hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
1792 cur_match = c->hash2_tab[hash2];
1793 c->hash2_tab[hash2] = in_next - in_begin;
1794 if (cur_match != 0 &&
1795 (LZX_HASH2_ORDER == 16 ||
1796 load_u16_unaligned(&in_begin[cur_match]) ==
1797 load_u16_unaligned(in_next)))
1799 lz_matchptr->length = 2;
1800 lz_matchptr->offset = in_next - &in_begin[cur_match];
1804 /* Check for matches of length >= 3. */
1805 lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
1811 c->max_search_depth,
1816 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1817 cache_ptr = lz_matchptr;
1820 * If there was a very long match found, then don't
1821 * cache any matches for the bytes covered by that
1822 * match. This avoids degenerate behavior when
1823 * compressing highly redundant data, where the number
1824 * of matches can be very large.
1826 * This heuristic doesn't actually hurt the compression
1827 * ratio very much. If there's a long match, then the
1828 * data must be highly compressible, so it doesn't
1829 * matter as much what we do.
1831 if (best_len >= nice_len) {
1834 if (unlikely(max_len > in_end - in_next)) {
1835 max_len = in_end - in_next;
1836 nice_len = min(max_len, nice_len);
1837 if (unlikely(max_len < 3)) {
1839 cache_ptr->length = 0;
1844 c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
1846 bt_matchfinder_skip_position(&c->bt_mf,
1851 c->max_search_depth,
1854 cache_ptr->length = 0;
1856 } while (--best_len);
1858 } while (in_next < in_block_end &&
1859 likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
1861 /* We've finished running the block through the matchfinder.
1862 * Now choose a match/literal sequence and write the block. */
1864 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
1865 in_next - in_block_begin,
1867 } while (in_next != in_end);
1871 * Given a pointer to the current byte sequence and the current list of recent
1872 * match offsets, find the longest repeat offset match.
1874 * If no match of at least 2 bytes is found, then return 0.
1876 * If a match of at least 2 bytes is found, then return its length and set
1877 * *rep_max_idx_ret to the index of its offset in @queue.
1880 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
1881 const u32 bytes_remaining,
1882 const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
1883 unsigned *rep_max_idx_ret)
1885 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1886 LZX_ASSERT(bytes_remaining >= 2);
1888 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
1889 const u16 next_2_bytes = load_u16_unaligned(in_next);
1891 unsigned rep_max_len;
1892 unsigned rep_max_idx;
1895 matchptr = in_next - recent_offsets[0];
1896 if (load_u16_unaligned(matchptr) == next_2_bytes)
1897 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
1902 matchptr = in_next - recent_offsets[1];
1903 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1904 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1905 if (rep_len > rep_max_len) {
1906 rep_max_len = rep_len;
1911 matchptr = in_next - recent_offsets[2];
1912 if (load_u16_unaligned(matchptr) == next_2_bytes) {
1913 rep_len = lz_extend(in_next, matchptr, 2, max_len);
1914 if (rep_len > rep_max_len) {
1915 rep_max_len = rep_len;
1920 *rep_max_idx_ret = rep_max_idx;
1924 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
1925 static inline unsigned
1926 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
1928 unsigned score = len;
1930 if (adjusted_offset < 4096)
1933 if (adjusted_offset < 256)
1939 static inline unsigned
1940 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
1945 /* This is the "lazy" LZX compressor. */
1947 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
1949 const u8 * const in_begin = c->in_buffer;
1950 const u8 * in_next = in_begin;
1951 const u8 * const in_end = in_begin + c->in_nbytes;
1952 unsigned max_len = LZX_MAX_MATCH_LEN;
1953 unsigned nice_len = min(c->nice_match_length, max_len);
1954 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
1955 u32 recent_offsets[3] = {1, 1, 1};
1956 u32 next_hashes[2] = {};
1958 hc_matchfinder_init(&c->hc_mf);
1961 /* Starting a new block */
1963 const u8 * const in_block_begin = in_next;
1964 const u8 * const in_block_end =
1965 in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
1966 struct lzx_sequence *next_seq = c->chosen_sequences;
1969 u32 cur_offset_data;
1973 u32 next_offset_data;
1974 unsigned next_score;
1975 unsigned rep_max_len;
1976 unsigned rep_max_idx;
1981 lzx_reset_symbol_frequencies(c);
1984 if (unlikely(max_len > in_end - in_next)) {
1985 max_len = in_end - in_next;
1986 nice_len = min(max_len, nice_len);
1989 /* Find the longest match at the current position. */
1991 cur_len = hc_matchfinder_longest_match(&c->hc_mf,
1997 c->max_search_depth,
2002 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2003 cur_offset != recent_offsets[0] &&
2004 cur_offset != recent_offsets[1] &&
2005 cur_offset != recent_offsets[2]))
2007 /* There was no match found, or the only match found
2008 * was a distant length 3 match. Output a literal. */
2009 lzx_record_literal(c, *in_next++, &litrunlen);
2013 if (cur_offset == recent_offsets[0]) {
2015 cur_offset_data = 0;
2016 skip_len = cur_len - 1;
2017 goto choose_cur_match;
2020 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2021 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2023 /* Consider a repeat offset match */
2024 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2030 if (rep_max_len >= 3 &&
2031 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2032 rep_max_idx)) >= cur_score)
2034 cur_len = rep_max_len;
2035 cur_offset_data = rep_max_idx;
2036 skip_len = rep_max_len - 1;
2037 goto choose_cur_match;
2042 /* We have a match at the current position. */
2044 /* If we have a very long match, choose it immediately. */
2045 if (cur_len >= nice_len) {
2046 skip_len = cur_len - 1;
2047 goto choose_cur_match;
2050 /* See if there's a better match at the next position. */
2052 if (unlikely(max_len > in_end - in_next)) {
2053 max_len = in_end - in_next;
2054 nice_len = min(max_len, nice_len);
2057 next_len = hc_matchfinder_longest_match(&c->hc_mf,
2063 c->max_search_depth / 2,
2067 if (next_len <= cur_len - 2) {
2069 skip_len = cur_len - 2;
2070 goto choose_cur_match;
2073 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2074 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2076 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2082 if (rep_max_len >= 3 &&
2083 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2084 rep_max_idx)) >= next_score)
2087 if (rep_score > cur_score) {
2088 /* The next match is better, and it's a
2089 * repeat offset match. */
2090 lzx_record_literal(c, *(in_next - 2),
2092 cur_len = rep_max_len;
2093 cur_offset_data = rep_max_idx;
2094 skip_len = cur_len - 1;
2095 goto choose_cur_match;
2098 if (next_score > cur_score) {
2099 /* The next match is better, and it's an
2100 * explicit offset match. */
2101 lzx_record_literal(c, *(in_next - 2),
2104 cur_offset_data = next_offset_data;
2105 cur_score = next_score;
2106 goto have_cur_match;
2110 /* The original match was better. */
2111 skip_len = cur_len - 2;
2114 lzx_record_match(c, cur_len, cur_offset_data,
2115 recent_offsets, &litrunlen, &next_seq);
2116 in_next = hc_matchfinder_skip_positions(&c->hc_mf,
2122 } while (in_next < in_block_end);
2124 lzx_finish_sequence(next_seq, litrunlen);
2126 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2128 } while (in_next != in_end);
2131 /* Generate the acceleration tables for offset slots. */
2133 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2135 u32 adjusted_offset = 0;
2139 for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2142 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2144 c->offset_slot_tab_1[adjusted_offset] = slot;
2147 /* slots [30, 49] */
2148 for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2149 adjusted_offset += (u32)1 << 14)
2151 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2153 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2158 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2160 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2161 return offsetof(struct lzx_compressor, hc_mf) +
2162 hc_matchfinder_size(max_bufsize);
2164 return offsetof(struct lzx_compressor, bt_mf) +
2165 bt_matchfinder_size(max_bufsize);
2170 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2175 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2178 size += lzx_get_compressor_size(max_bufsize, compression_level);
2180 size += max_bufsize; /* in_buffer */
2185 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2186 bool destructive, void **c_ret)
2188 unsigned window_order;
2189 struct lzx_compressor *c;
2191 window_order = lzx_get_window_order(max_bufsize);
2192 if (window_order == 0)
2193 return WIMLIB_ERR_INVALID_PARAM;
2195 c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2199 c->destructive = destructive;
2201 c->num_main_syms = lzx_get_num_main_syms(window_order);
2202 c->window_order = window_order;
2204 if (!c->destructive) {
2205 c->in_buffer = MALLOC(max_bufsize);
2210 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2212 /* Fast compression: Use lazy parsing. */
2214 c->impl = lzx_compress_lazy;
2215 c->max_search_depth = (36 * compression_level) / 20;
2216 c->nice_match_length = (72 * compression_level) / 20;
2218 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2219 * halves the max_search_depth when attempting a lazy match, and
2220 * max_search_depth cannot be 0. */
2221 if (c->max_search_depth < 2)
2222 c->max_search_depth = 2;
2225 /* Normal / high compression: Use near-optimal parsing. */
2227 c->impl = lzx_compress_near_optimal;
2229 /* Scale nice_match_length and max_search_depth with the
2230 * compression level. */
2231 c->max_search_depth = (24 * compression_level) / 50;
2232 c->nice_match_length = (32 * compression_level) / 50;
2234 /* Set a number of optimization passes appropriate for the
2235 * compression level. */
2237 c->num_optim_passes = 1;
2239 if (compression_level >= 45)
2240 c->num_optim_passes++;
2242 /* Use more optimization passes for higher compression levels.
2243 * But the more passes there are, the less they help --- so
2244 * don't add them linearly. */
2245 if (compression_level >= 70) {
2246 c->num_optim_passes++;
2247 if (compression_level >= 100)
2248 c->num_optim_passes++;
2249 if (compression_level >= 150)
2250 c->num_optim_passes++;
2251 if (compression_level >= 200)
2252 c->num_optim_passes++;
2253 if (compression_level >= 300)
2254 c->num_optim_passes++;
2258 /* max_search_depth == 0 is invalid. */
2259 if (c->max_search_depth < 1)
2260 c->max_search_depth = 1;
2262 if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2263 c->nice_match_length = LZX_MAX_MATCH_LEN;
2265 lzx_init_offset_slot_tabs(c);
2272 return WIMLIB_ERR_NOMEM;
2276 lzx_compress(const void *restrict in, size_t in_nbytes,
2277 void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2279 struct lzx_compressor *c = _c;
2280 struct lzx_output_bitstream os;
2283 /* Don't bother trying to compress very small inputs. */
2284 if (in_nbytes < 100)
2287 /* Copy the input data into the internal buffer and preprocess it. */
2289 c->in_buffer = (void *)in;
2291 memcpy(c->in_buffer, in, in_nbytes);
2292 c->in_nbytes = in_nbytes;
2293 lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
2295 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2297 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2299 /* Initialize the output bitstream. */
2300 lzx_init_output(&os, out, out_nbytes_avail);
2302 /* Call the compression level-specific compress() function. */
2305 /* Flush the output bitstream and return the compressed size or 0. */
2306 result = lzx_flush_output(&os);
2307 if (!result && c->destructive)
2308 lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
2313 lzx_free_compressor(void *_c)
2315 struct lzx_compressor *c = _c;
2317 if (!c->destructive)
2322 const struct compressor_ops lzx_compressor_ops = {
2323 .get_needed_memory = lzx_get_needed_memory,
2324 .create_compressor = lzx_create_compressor,
2325 .compress = lzx_compress,
2326 .free_compressor = lzx_free_compressor,