4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012-2016 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * The compressor always chooses a block of at least MIN_BLOCK_SIZE bytes,
69 * except if the last block has to be shorter.
71 #define MIN_BLOCK_SIZE 6500
74 * The compressor attempts to end blocks after SOFT_MAX_BLOCK_SIZE bytes, but
75 * the final size might be larger due to matches extending beyond the end of the
76 * block. Specifically:
78 * - The greedy parser may choose an arbitrarily long match starting at the
79 * SOFT_MAX_BLOCK_SIZE'th byte.
81 * - The lazy parser may choose a sequence of literals starting at the
82 * SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasing good
83 * matches. The final match may be of arbitrary length. The length of the
84 * literal sequence is approximately limited by the "nice match length"
87 #define SOFT_MAX_BLOCK_SIZE 100000
90 * The number of observed matches or literals that represents sufficient data to
91 * decide whether the current block should be terminated or not.
93 #define NUM_OBSERVATIONS_PER_BLOCK_CHECK 500
96 * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
97 * excluding the extra "overflow" entries. This value should be high enough so
98 * that nearly the time, all matches found in a given block can fit in the match
99 * cache. However, fallback behavior (immediately terminating the block) on
100 * cache overflow is still required.
102 #define LZX_CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5)
105 * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
106 * ever be saved in the match cache for a single position. Since each match we
107 * save for a single position has a distinct length, we can use the number of
108 * possible match lengths in LZX as this bound. This bound is guaranteed to be
109 * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
110 * it will never actually be reached.
112 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
115 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
116 * This makes it possible to consider fractional bit costs.
118 * Note: this is only useful as a statistical trick for when the true costs are
119 * unknown. In reality, each token in LZX requires a whole number of bits to
122 #define LZX_BIT_COST 64
125 * Should the compressor take into account the costs of aligned offset symbols?
127 #define LZX_CONSIDER_ALIGNED_COSTS 1
130 * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
133 #define LZX_MAX_FAST_LEVEL 34
136 * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
137 * hash table for finding length 2 matches. This could be as high as 16, but
138 * using a smaller hash table speeds up compression due to reduced cache
141 #define BT_MATCHFINDER_HASH2_ORDER 12
144 * These are the compressor-side limits on the codeword lengths for each Huffman
145 * code. To make outputting bits slightly faster, some of these limits are
146 * lower than the limits defined by the LZX format. This does not significantly
147 * affect the compression ratio, at least for the block sizes we use.
149 #define MAIN_CODEWORD_LIMIT 16
150 #define LENGTH_CODEWORD_LIMIT 12
151 #define ALIGNED_CODEWORD_LIMIT 7
152 #define PRE_CODEWORD_LIMIT 7
154 #include "wimlib/compress_common.h"
155 #include "wimlib/compressor_ops.h"
156 #include "wimlib/error.h"
157 #include "wimlib/lz_extend.h"
158 #include "wimlib/lzx_common.h"
159 #include "wimlib/unaligned.h"
160 #include "wimlib/util.h"
162 /* Matchfinders with 16-bit positions */
164 #define MF_SUFFIX _16
165 #include "wimlib/bt_matchfinder.h"
166 #include "wimlib/hc_matchfinder.h"
168 /* Matchfinders with 32-bit positions */
172 #define MF_SUFFIX _32
173 #include "wimlib/bt_matchfinder.h"
174 #include "wimlib/hc_matchfinder.h"
176 struct lzx_output_bitstream;
178 /* Codewords for the LZX Huffman codes. */
179 struct lzx_codewords {
180 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
181 u32 len[LZX_LENCODE_NUM_SYMBOLS];
182 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
185 /* Codeword lengths (in bits) for the LZX Huffman codes.
186 * A zero length means the corresponding codeword has zero frequency. */
188 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
189 u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
190 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
193 /* Cost model for near-optimal parsing */
196 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
197 * length 'len' match that has an offset belonging to 'offset_slot'. */
198 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
200 /* Cost for each symbol in the main code */
201 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
203 /* Cost for each symbol in the length code */
204 u32 len[LZX_LENCODE_NUM_SYMBOLS];
206 #if LZX_CONSIDER_ALIGNED_COSTS
207 /* Cost for each symbol in the aligned code */
208 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
212 /* Codewords and lengths for the LZX Huffman codes. */
214 struct lzx_codewords codewords;
215 struct lzx_lens lens;
218 /* Symbol frequency counters for the LZX Huffman codes. */
220 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
221 u32 len[LZX_LENCODE_NUM_SYMBOLS];
222 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
225 /* Block split statistics. See "Block splitting algorithm" below. */
226 #define NUM_LITERAL_OBSERVATION_TYPES 8
227 #define NUM_MATCH_OBSERVATION_TYPES 2
228 #define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
229 struct block_split_stats {
230 u32 new_observations[NUM_OBSERVATION_TYPES];
231 u32 observations[NUM_OBSERVATION_TYPES];
232 u32 num_new_observations;
233 u32 num_observations;
237 * Represents a run of literals followed by a match or end-of-block. This
238 * struct is needed to temporarily store items chosen by the parser, since items
239 * cannot be written until all items for the block have been chosen and the
240 * block's Huffman codes have been computed.
242 struct lzx_sequence {
244 /* The number of literals in the run. This may be 0. The literals are
245 * not stored explicitly in this structure; instead, they are read
246 * directly from the uncompressed data. */
249 /* If the next field doesn't indicate end-of-block, then this is the
250 * match length minus LZX_MIN_MATCH_LEN. */
253 /* If bit 31 is clear, then this field contains the match header in bits
254 * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
255 * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
256 * sequence's literal run was the last literal run in the block, so
257 * there is no match that follows it. */
258 u32 adjusted_offset_and_match_hdr;
262 * This structure represents a byte position in the input buffer and a node in
263 * the graph of possible match/literal choices.
265 * Logically, each incoming edge to this node is labeled with a literal or a
266 * match that can be taken to reach this position from an earlier position; and
267 * each outgoing edge from this node is labeled with a literal or a match that
268 * can be taken to advance from this position to a later position.
270 struct lzx_optimum_node {
272 /* The cost, in bits, of the lowest-cost path that has been found to
273 * reach this position. This can change as progressively lower cost
274 * paths are found to reach this position. */
278 * The match or literal that was taken to reach this position. This can
279 * change as progressively lower cost paths are found to reach this
282 * This variable is divided into two bitfields.
285 * Low bits are 0, high bits are the literal.
287 * Explicit offset matches:
288 * Low bits are the match length, high bits are the offset plus 2.
290 * Repeat offset matches:
291 * Low bits are the match length, high bits are the queue index.
294 #define OPTIMUM_OFFSET_SHIFT 9
295 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
296 #define OPTIMUM_EXTRA_FLAG 0x80000000
299 } _aligned_attribute(8);
302 * Least-recently-used queue for match offsets.
304 * This is represented as a 64-bit integer for efficiency. There are three
305 * offsets of 21 bits each. Bit 64 is garbage.
307 struct lzx_lru_queue {
311 #define LZX_QUEUE64_OFFSET_SHIFT 21
312 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
314 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
315 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
316 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
318 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
319 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
320 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
323 lzx_lru_queue_init(struct lzx_lru_queue *queue)
325 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
326 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
327 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
331 lzx_lru_queue_R0(struct lzx_lru_queue queue)
333 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
337 lzx_lru_queue_R1(struct lzx_lru_queue queue)
339 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
343 lzx_lru_queue_R2(struct lzx_lru_queue queue)
345 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
348 /* Push a match offset onto the front (most recently used) end of the queue. */
349 static inline struct lzx_lru_queue
350 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
352 return (struct lzx_lru_queue) {
353 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
357 /* Swap a match offset to the front of the queue. */
358 static inline struct lzx_lru_queue
359 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
365 return (struct lzx_lru_queue) {
366 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
367 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
368 (queue.R & LZX_QUEUE64_R2_MASK),
371 return (struct lzx_lru_queue) {
372 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
373 (queue.R & LZX_QUEUE64_R1_MASK) |
374 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
378 /* The main LZX compressor structure */
379 struct lzx_compressor {
381 /* The "nice" match length: if a match of this length is found, then
382 * choose it immediately without further consideration. */
383 unsigned nice_match_length;
385 /* The maximum search depth: consider at most this many potential
386 * matches at each position. */
387 unsigned max_search_depth;
389 /* The log base 2 of the LZX window size for LZ match offset encoding
390 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
391 * LZX_MAX_WINDOW_ORDER. */
392 unsigned window_order;
394 /* The number of symbols in the main alphabet. This depends on
395 * @window_order, since @window_order determines the maximum possible
397 unsigned num_main_syms;
399 /* Number of optimization passes per block */
400 unsigned num_optim_passes;
402 /* The preprocessed buffer of data being compressed */
405 /* The number of bytes of data to be compressed, which is the number of
406 * bytes of data in @in_buffer that are actually valid. */
409 /* Pointer to the compress() implementation chosen at allocation time */
410 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
412 /* If true, the compressor need not preserve the input buffer if it
413 * compresses the data successfully. */
416 /* The Huffman symbol frequency counters for the current block. */
417 struct lzx_freqs freqs;
419 /* Block split statistics. */
420 struct block_split_stats split_stats;
422 /* The Huffman codes for the current and previous blocks. The one with
423 * index 'codes_index' is for the current block, and the other one is
424 * for the previous block. */
425 struct lzx_codes codes[2];
426 unsigned codes_index;
428 /* The matches and literals that the parser has chosen for the current
429 * block. The required length of this array is limited by the maximum
430 * number of matches that can ever be chosen for a single block, plus
431 * one for the special entry at the end. */
432 struct lzx_sequence chosen_sequences[
433 DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
435 /* Tables for mapping adjusted offsets to offset slots */
437 /* offset slots [0, 29] */
438 u8 offset_slot_tab_1[32768];
440 /* offset slots [30, 49] */
441 u8 offset_slot_tab_2[128];
444 /* Data for greedy or lazy parsing */
446 /* Hash chains matchfinder (MUST BE LAST!!!) */
448 struct hc_matchfinder_16 hc_mf_16;
449 struct hc_matchfinder_32 hc_mf_32;
453 /* Data for near-optimal parsing */
456 * Array of nodes, one per position, for running the
457 * minimum-cost path algorithm.
459 * This array must be large enough to accommodate the
460 * worst-case number of nodes, which occurs if we find a
461 * match of length LZX_MAX_MATCH_LEN at position
462 * SOFT_MAX_BLOCK_SIZE - 1, producing a block of length
463 * SOFT_MAX_BLOCK_SIZE - 1 + LZX_MAX_MATCH_LEN. Add one
464 * for the end-of-block node.
466 struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_SIZE - 1 +
467 LZX_MAX_MATCH_LEN + 1];
469 /* The cost model for the current block */
470 struct lzx_costs costs;
473 * Cached matches for the current block. This array
474 * contains the matches that were found at each position
475 * in the block. Specifically, for each position, there
476 * is a special 'struct lz_match' whose 'length' field
477 * contains the number of matches that were found at
478 * that position; this is followed by the matches
479 * themselves, if any, sorted by strictly increasing
482 * Note: in rare cases, there will be a very high number
483 * of matches in the block and this array will overflow.
484 * If this happens, we force the end of the current
485 * block. LZX_CACHE_LENGTH is the length at which we
486 * actually check for overflow. The extra slots beyond
487 * this are enough to absorb the worst case overflow,
488 * which occurs if starting at
489 * &match_cache[LZX_CACHE_LENGTH - 1], we write the
490 * match count header, then write
491 * LZX_MAX_MATCHES_PER_POS matches, then skip searching
492 * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
493 * write the match count header for each.
495 struct lz_match match_cache[LZX_CACHE_LENGTH +
496 LZX_MAX_MATCHES_PER_POS +
497 LZX_MAX_MATCH_LEN - 1];
499 /* Binary trees matchfinder (MUST BE LAST!!!) */
501 struct bt_matchfinder_16 bt_mf_16;
502 struct bt_matchfinder_32 bt_mf_32;
509 * Will a matchfinder using 16-bit positions be sufficient for compressing
510 * buffers of up to the specified size? The limit could be 65536 bytes, but we
511 * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
512 * This requires that the limit be no more than the length of offset_slot_tab_1
516 lzx_is_16_bit(size_t max_bufsize)
518 STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
519 return max_bufsize <= 32768;
523 * The following macros call either the 16-bit or the 32-bit version of a
524 * matchfinder function based on the value of 'is_16_bit', which will be known
525 * at compilation time.
528 #define CALL_HC_MF(is_16_bit, c, funcname, ...) \
529 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
530 CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
532 #define CALL_BT_MF(is_16_bit, c, funcname, ...) \
533 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
534 CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
537 * Structure to keep track of the current state of sending bits to the
538 * compressed output buffer.
540 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
542 struct lzx_output_bitstream {
544 /* Bits that haven't yet been written to the output buffer. */
545 machine_word_t bitbuf;
547 /* Number of bits currently held in @bitbuf. */
550 /* Pointer to the start of the output buffer. */
553 /* Pointer to the position in the output buffer at which the next coding
554 * unit should be written. */
557 /* Pointer just past the end of the output buffer, rounded down to a
558 * 2-byte boundary. */
562 /* Can the specified number of bits always be added to 'bitbuf' after any
563 * pending 16-bit coding units have been flushed? */
564 #define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15)
567 * Initialize the output bitstream.
570 * The output bitstream structure to initialize.
572 * The buffer being written to.
574 * Size of @buffer, in bytes.
577 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
582 os->next = os->start;
583 os->end = os->start + (size & ~1);
586 /* Add some bits to the bitbuffer variable of the output bitstream. The caller
587 * must make sure there is enough room. */
589 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
591 os->bitbuf = (os->bitbuf << num_bits) | bits;
592 os->bitcount += num_bits;
595 /* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
596 * specifies the maximum number of bits that may have been added since the last
599 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
601 /* Masking the number of bits to shift is only needed to avoid undefined
602 * behavior; we don't actually care about the results of bad shifts. On
603 * x86, the explicit masking generates no extra code. */
604 const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
606 if (os->end - os->next < 6)
608 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
609 shift_mask), os->next + 0);
610 if (max_num_bits > 16)
611 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
612 shift_mask), os->next + 2);
613 if (max_num_bits > 32)
614 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
615 shift_mask), os->next + 4);
616 os->next += (os->bitcount >> 4) << 1;
620 /* Add at most 16 bits to the bitbuffer and flush it. */
622 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
624 lzx_add_bits(os, bits, num_bits);
625 lzx_flush_bits(os, 16);
629 * Flush the last coding unit to the output buffer if needed. Return the total
630 * number of bytes written to the output buffer, or 0 if an overflow occurred.
633 lzx_flush_output(struct lzx_output_bitstream *os)
635 if (os->end - os->next < 6)
638 if (os->bitcount != 0) {
639 put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
643 return os->next - os->start;
647 * Build the main, length, and aligned offset Huffman codes used in LZX.
649 * This takes as input the frequency tables for each code and produces as output
650 * a set of tables that map symbols to codewords and codeword lengths.
653 lzx_make_huffman_codes(struct lzx_compressor *c)
655 const struct lzx_freqs *freqs = &c->freqs;
656 struct lzx_codes *codes = &c->codes[c->codes_index];
658 STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
659 MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
660 STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
661 LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
662 STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
663 ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
665 make_canonical_huffman_code(c->num_main_syms,
669 codes->codewords.main);
671 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
672 LENGTH_CODEWORD_LIMIT,
675 codes->codewords.len);
677 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
678 ALIGNED_CODEWORD_LIMIT,
681 codes->codewords.aligned);
684 /* Reset the symbol frequencies for the LZX Huffman codes. */
686 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
688 memset(&c->freqs, 0, sizeof(c->freqs));
692 lzx_compute_precode_items(const u8 lens[restrict],
693 const u8 prev_lens[restrict],
694 u32 precode_freqs[restrict],
695 unsigned precode_items[restrict])
704 itemptr = precode_items;
707 while (!((len = lens[run_start]) & 0x80)) {
709 /* len = the length being repeated */
711 /* Find the next run of codeword lengths. */
713 run_end = run_start + 1;
715 /* Fast case for a single length. */
716 if (likely(len != lens[run_end])) {
717 delta = prev_lens[run_start] - len;
720 precode_freqs[delta]++;
726 /* Extend the run. */
729 } while (len == lens[run_end]);
734 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
735 while ((run_end - run_start) >= 20) {
736 extra_bits = min((run_end - run_start) - 20, 0x1f);
738 *itemptr++ = 18 | (extra_bits << 5);
739 run_start += 20 + extra_bits;
742 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
743 if ((run_end - run_start) >= 4) {
744 extra_bits = min((run_end - run_start) - 4, 0xf);
746 *itemptr++ = 17 | (extra_bits << 5);
747 run_start += 4 + extra_bits;
751 /* A run of nonzero lengths. */
753 /* Symbol 19: RLE 4 to 5 of any length at a time. */
754 while ((run_end - run_start) >= 4) {
755 extra_bits = (run_end - run_start) > 4;
756 delta = prev_lens[run_start] - len;
760 precode_freqs[delta]++;
761 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
762 run_start += 4 + extra_bits;
766 /* Output any remaining lengths without RLE. */
767 while (run_start != run_end) {
768 delta = prev_lens[run_start] - len;
771 precode_freqs[delta]++;
777 return itemptr - precode_items;
781 * Output a Huffman code in the compressed form used in LZX.
783 * The Huffman code is represented in the output as a logical series of codeword
784 * lengths from which the Huffman code, which must be in canonical form, can be
787 * The codeword lengths are themselves compressed using a separate Huffman code,
788 * the "precode", which contains a symbol for each possible codeword length in
789 * the larger code as well as several special symbols to represent repeated
790 * codeword lengths (a form of run-length encoding). The precode is itself
791 * constructed in canonical form, and its codeword lengths are represented
792 * literally in 20 4-bit fields that immediately precede the compressed codeword
793 * lengths of the larger code.
795 * Furthermore, the codeword lengths of the larger code are actually represented
796 * as deltas from the codeword lengths of the corresponding code in the previous
800 * Bitstream to which to write the compressed Huffman code.
802 * The codeword lengths, indexed by symbol, in the Huffman code.
804 * The codeword lengths, indexed by symbol, in the corresponding Huffman
805 * code in the previous block, or all zeroes if this is the first block.
807 * The number of symbols in the Huffman code.
810 lzx_write_compressed_code(struct lzx_output_bitstream *os,
811 const u8 lens[restrict],
812 const u8 prev_lens[restrict],
815 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
816 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
817 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
818 unsigned precode_items[num_lens];
819 unsigned num_precode_items;
820 unsigned precode_item;
821 unsigned precode_sym;
823 u8 saved = lens[num_lens];
824 *(u8 *)(lens + num_lens) = 0x80;
826 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
827 precode_freqs[i] = 0;
829 /* Compute the "items" (RLE / literal tokens and extra bits) with which
830 * the codeword lengths in the larger code will be output. */
831 num_precode_items = lzx_compute_precode_items(lens,
836 /* Build the precode. */
837 STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
838 PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
839 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
841 precode_freqs, precode_lens,
844 /* Output the lengths of the codewords in the precode. */
845 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
846 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
848 /* Output the encoded lengths of the codewords in the larger code. */
849 for (i = 0; i < num_precode_items; i++) {
850 precode_item = precode_items[i];
851 precode_sym = precode_item & 0x1F;
852 lzx_add_bits(os, precode_codewords[precode_sym],
853 precode_lens[precode_sym]);
854 if (precode_sym >= 17) {
855 if (precode_sym == 17) {
856 lzx_add_bits(os, precode_item >> 5, 4);
857 } else if (precode_sym == 18) {
858 lzx_add_bits(os, precode_item >> 5, 5);
860 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
861 precode_sym = precode_item >> 6;
862 lzx_add_bits(os, precode_codewords[precode_sym],
863 precode_lens[precode_sym]);
866 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
867 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
870 *(u8 *)(lens + num_lens) = saved;
874 * Write all matches and literal bytes (which were precomputed) in an LZX
875 * compressed block to the output bitstream in the final compressed
879 * The output bitstream.
881 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
882 * LZX_BLOCKTYPE_VERBATIM).
884 * The uncompressed data of the block.
886 * The matches and literals to output, given as a series of sequences.
888 * The main, length, and aligned offset Huffman codes for the current
889 * LZX compressed block.
892 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
893 const u8 *block_data, const struct lzx_sequence sequences[],
894 const struct lzx_codes *codes)
896 const struct lzx_sequence *seq = sequences;
897 u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
900 /* Output the next sequence. */
902 unsigned litrunlen = seq->litrunlen;
904 unsigned main_symbol;
905 unsigned adjusted_length;
907 unsigned offset_slot;
908 unsigned num_extra_bits;
911 /* Output the literal run of the sequence. */
913 if (litrunlen) { /* Is the literal run nonempty? */
915 /* Verify optimization is enabled on 64-bit */
916 STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
917 CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
919 if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
921 /* 64-bit: write 3 literals at a time. */
922 while (litrunlen >= 3) {
923 unsigned lit0 = block_data[0];
924 unsigned lit1 = block_data[1];
925 unsigned lit2 = block_data[2];
926 lzx_add_bits(os, codes->codewords.main[lit0],
927 codes->lens.main[lit0]);
928 lzx_add_bits(os, codes->codewords.main[lit1],
929 codes->lens.main[lit1]);
930 lzx_add_bits(os, codes->codewords.main[lit2],
931 codes->lens.main[lit2]);
932 lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
937 unsigned lit = *block_data++;
938 lzx_add_bits(os, codes->codewords.main[lit],
939 codes->lens.main[lit]);
941 unsigned lit = *block_data++;
942 lzx_add_bits(os, codes->codewords.main[lit],
943 codes->lens.main[lit]);
944 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
946 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
950 /* 32-bit: write 1 literal at a time. */
952 unsigned lit = *block_data++;
953 lzx_add_bits(os, codes->codewords.main[lit],
954 codes->lens.main[lit]);
955 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
956 } while (--litrunlen);
960 /* Was this the last literal run? */
961 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
964 /* Nope; output the match. */
966 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
967 main_symbol = LZX_NUM_CHARS + match_hdr;
968 adjusted_length = seq->adjusted_length;
970 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
972 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
973 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
975 num_extra_bits = lzx_extra_offset_bits[offset_slot];
976 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
978 #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
979 14 + ALIGNED_CODEWORD_LIMIT)
981 /* Verify optimization is enabled on 64-bit */
982 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
984 /* Output the main symbol for the match. */
986 lzx_add_bits(os, codes->codewords.main[main_symbol],
987 codes->lens.main[main_symbol]);
988 if (!CAN_BUFFER(MAX_MATCH_BITS))
989 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
991 /* If needed, output the length symbol for the match. */
993 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
994 lzx_add_bits(os, codes->codewords.len[adjusted_length -
995 LZX_NUM_PRIMARY_LENS],
996 codes->lens.len[adjusted_length -
997 LZX_NUM_PRIMARY_LENS]);
998 if (!CAN_BUFFER(MAX_MATCH_BITS))
999 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
1002 /* Output the extra offset bits for the match. In aligned
1003 * offset blocks, the lowest 3 bits of the adjusted offset are
1004 * Huffman-encoded using the aligned offset code, provided that
1005 * there are at least extra 3 offset bits required. All other
1006 * extra offset bits are output verbatim. */
1008 if ((adjusted_offset & ones_if_aligned) >= 16) {
1010 lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
1011 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1012 if (!CAN_BUFFER(MAX_MATCH_BITS))
1013 lzx_flush_bits(os, 14);
1015 lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1016 LZX_ALIGNED_OFFSET_BITMASK],
1017 codes->lens.aligned[adjusted_offset &
1018 LZX_ALIGNED_OFFSET_BITMASK]);
1019 if (!CAN_BUFFER(MAX_MATCH_BITS))
1020 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1022 STATIC_ASSERT(CAN_BUFFER(17));
1024 lzx_add_bits(os, extra_bits, num_extra_bits);
1025 if (!CAN_BUFFER(MAX_MATCH_BITS))
1026 lzx_flush_bits(os, 17);
1029 if (CAN_BUFFER(MAX_MATCH_BITS))
1030 lzx_flush_bits(os, MAX_MATCH_BITS);
1032 /* Advance to the next sequence. */
1038 lzx_write_compressed_block(const u8 *block_begin,
1041 unsigned window_order,
1042 unsigned num_main_syms,
1043 const struct lzx_sequence sequences[],
1044 const struct lzx_codes * codes,
1045 const struct lzx_lens * prev_lens,
1046 struct lzx_output_bitstream * os)
1048 /* The first three bits indicate the type of block and are one of the
1049 * LZX_BLOCKTYPE_* constants. */
1050 lzx_write_bits(os, block_type, 3);
1052 /* Output the block size.
1054 * The original LZX format seemed to always encode the block size in 3
1055 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
1056 * uses the first bit to indicate whether the block is the default size
1057 * (32768) or a different size given explicitly by the next 16 bits.
1059 * By default, this compressor uses a window size of 32768 and therefore
1060 * follows the WIMGAPI behavior. However, this compressor also supports
1061 * window sizes greater than 32768 bytes, which do not appear to be
1062 * supported by WIMGAPI. In such cases, we retain the default size bit
1063 * to mean a size of 32768 bytes but output non-default block size in 24
1064 * bits rather than 16. The compatibility of this behavior is unknown
1065 * because WIMs created with chunk size greater than 32768 can seemingly
1066 * only be opened by wimlib anyway. */
1067 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1068 lzx_write_bits(os, 1, 1);
1070 lzx_write_bits(os, 0, 1);
1072 if (window_order >= 16)
1073 lzx_write_bits(os, block_size >> 16, 8);
1075 lzx_write_bits(os, block_size & 0xFFFF, 16);
1078 /* If it's an aligned offset block, output the aligned offset code. */
1079 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1080 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1081 lzx_write_bits(os, codes->lens.aligned[i],
1082 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1086 /* Output the main code (two parts). */
1087 lzx_write_compressed_code(os, codes->lens.main,
1090 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1091 prev_lens->main + LZX_NUM_CHARS,
1092 num_main_syms - LZX_NUM_CHARS);
1094 /* Output the length code. */
1095 lzx_write_compressed_code(os, codes->lens.len,
1097 LZX_LENCODE_NUM_SYMBOLS);
1099 /* Output the compressed matches and literals. */
1100 lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1103 /* Given the frequencies of symbols in an LZX-compressed block and the
1104 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1105 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1106 * will take fewer bits to output. */
1108 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1109 const struct lzx_codes * codes)
1111 u32 aligned_cost = 0;
1112 u32 verbatim_cost = 0;
1114 /* A verbatim block requires 3 bits in each place that an aligned symbol
1115 * would be used in an aligned offset block. */
1116 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1117 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1118 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1121 /* Account for output of the aligned offset code. */
1122 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1124 if (aligned_cost < verbatim_cost)
1125 return LZX_BLOCKTYPE_ALIGNED;
1127 return LZX_BLOCKTYPE_VERBATIM;
1131 * Return the offset slot for the specified adjusted match offset, using the
1132 * compressor's acceleration tables to speed up the mapping.
1134 static inline unsigned
1135 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1138 if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1139 return c->offset_slot_tab_1[adjusted_offset];
1140 return c->offset_slot_tab_2[adjusted_offset >> 14];
1144 * Flush an LZX block:
1146 * 1. Build the Huffman codes.
1147 * 2. Decide whether to output the block as VERBATIM or ALIGNED.
1148 * 3. Write the block.
1149 * 4. Swap the indices of the current and previous Huffman codes.
1152 lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1153 const u8 *block_begin, u32 block_size, u32 seq_idx)
1157 lzx_make_huffman_codes(c);
1159 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1160 &c->codes[c->codes_index]);
1161 lzx_write_compressed_block(block_begin,
1166 &c->chosen_sequences[seq_idx],
1167 &c->codes[c->codes_index],
1168 &c->codes[c->codes_index ^ 1].lens,
1170 c->codes_index ^= 1;
1173 /* Tally the Huffman symbol for a literal and increment the literal run length.
1176 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1178 c->freqs.main[literal]++;
1182 /* Tally the Huffman symbol for a match, save the match data and the length of
1183 * the preceding literal run in the next lzx_sequence, and update the recent
1186 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1187 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1188 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1190 u32 litrunlen = *litrunlen_p;
1191 struct lzx_sequence *next_seq = *next_seq_p;
1192 unsigned offset_slot;
1195 v = length - LZX_MIN_MATCH_LEN;
1197 /* Save the literal run length and adjusted length. */
1198 next_seq->litrunlen = litrunlen;
1199 next_seq->adjusted_length = v;
1201 /* Compute the length header and tally the length symbol if needed */
1202 if (v >= LZX_NUM_PRIMARY_LENS) {
1203 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1204 v = LZX_NUM_PRIMARY_LENS;
1207 /* Compute the offset slot */
1208 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1210 /* Compute the match header. */
1211 v += offset_slot * LZX_NUM_LEN_HEADERS;
1213 /* Save the adjusted offset and match header. */
1214 next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1216 /* Tally the main symbol. */
1217 c->freqs.main[LZX_NUM_CHARS + v]++;
1219 /* Update the recent offsets queue. */
1220 if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1221 /* Repeat offset match */
1222 swap(recent_offsets[0], recent_offsets[offset_data]);
1224 /* Explicit offset match */
1226 /* Tally the aligned offset symbol if needed */
1227 if (offset_data >= 16)
1228 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1230 recent_offsets[2] = recent_offsets[1];
1231 recent_offsets[1] = recent_offsets[0];
1232 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1235 /* Reset the literal run length and advance to the next sequence. */
1236 *next_seq_p = next_seq + 1;
1240 /* Finish the last lzx_sequence. The last lzx_sequence is just a literal run;
1241 * there is no match. This literal run may be empty. */
1243 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1245 last_seq->litrunlen = litrunlen;
1247 /* Special value to mark last sequence */
1248 last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1251 /******************************************************************************/
1254 * Block splitting algorithm. The problem is to decide when it is worthwhile to
1255 * start a new block with new entropy codes. There is a theoretically optimal
1256 * solution: recursively consider every possible block split, considering the
1257 * exact cost of each block, and choose the minimum cost approach. But this is
1258 * far too slow. Instead, as an approximation, we can count symbols and after
1259 * every N symbols, compare the expected distribution of symbols based on the
1260 * previous data with the actual distribution. If they differ "by enough", then
1261 * start a new block.
1263 * As an optimization and heuristic, we don't distinguish between every symbol
1264 * but rather we combine many symbols into a single "observation type". For
1265 * literals we only look at the high bits and low bits, and for matches we only
1266 * look at whether the match is long or not. The assumption is that for typical
1267 * "real" data, places that are good block boundaries will tend to be noticable
1268 * based only on changes in these aggregate frequencies, without looking for
1269 * subtle differences in individual symbols. For example, a change from ASCII
1270 * bytes to non-ASCII bytes, or from few matches (generally less compressible)
1271 * to many matches (generally more compressible), would be easily noticed based
1272 * on the aggregates.
1274 * For determining whether the frequency distributions are "different enough" to
1275 * start a new block, the simply heuristic of splitting when the sum of absolute
1276 * differences exceeds a constant seems to be good enough. We also add a number
1277 * proportional to the block size so that the algorithm is more likely to end
1278 * large blocks than small blocks. This reflects the general expectation that
1279 * it will become increasingly beneficial to start a new block as the current
1280 * blocks grows larger.
1282 * Finally, for an approximation, it is not strictly necessary that the exact
1283 * symbols being used are considered. With "near-optimal parsing", for example,
1284 * the actual symbols that will be used are unknown until after the block
1285 * boundary is chosen and the block has been optimized. Since the final choices
1286 * cannot be used, we can use preliminary "greedy" choices instead.
1289 /* Initialize the block split statistics when starting a new block. */
1291 init_block_split_stats(struct block_split_stats *stats)
1293 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1294 stats->new_observations[i] = 0;
1295 stats->observations[i] = 0;
1297 stats->num_new_observations = 0;
1298 stats->num_observations = 0;
1301 /* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
1302 * literal, for 8 possible literal observation types. */
1304 observe_literal(struct block_split_stats *stats, u8 lit)
1306 stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
1307 stats->num_new_observations++;
1310 /* Match observation. Heuristic: use one observation type for "short match" and
1311 * one observation type for "long match". */
1313 observe_match(struct block_split_stats *stats, unsigned length)
1315 stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
1316 stats->num_new_observations++;
1320 do_end_block_check(struct block_split_stats *stats, u32 block_size)
1322 if (stats->num_observations > 0) {
1324 /* Note: to avoid slow divisions, we do not divide by
1325 * 'num_observations', but rather do all math with the numbers
1326 * multiplied by 'num_observations'. */
1327 u32 total_delta = 0;
1328 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1329 u32 expected = stats->observations[i] * stats->num_new_observations;
1330 u32 actual = stats->new_observations[i] * stats->num_observations;
1331 u32 delta = (actual > expected) ? actual - expected :
1333 total_delta += delta;
1336 /* Ready to end the block? */
1337 if (total_delta + (block_size / 1024) * stats->num_observations >=
1338 stats->num_new_observations * 51 / 64 * stats->num_observations)
1342 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1343 stats->num_observations += stats->new_observations[i];
1344 stats->observations[i] += stats->new_observations[i];
1345 stats->new_observations[i] = 0;
1347 stats->num_new_observations = 0;
1352 should_end_block(struct block_split_stats *stats,
1353 const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
1355 /* Ready to check block split statistics? */
1356 if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK ||
1357 in_next - in_block_begin < MIN_BLOCK_SIZE ||
1358 in_end - in_next < MIN_BLOCK_SIZE)
1361 return do_end_block_check(stats, in_next - in_block_begin);
1364 /******************************************************************************/
1367 * Given the minimum-cost path computed through the item graph for the current
1368 * block, walk the path and count how many of each symbol in each Huffman-coded
1369 * alphabet would be required to output the items (matches and literals) along
1372 * Note that the path will be walked backwards (from the end of the block to the
1373 * beginning of the block), but this doesn't matter because this function only
1374 * computes frequencies.
1377 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1379 u32 node_idx = block_size;
1386 unsigned offset_slot;
1388 /* Tally literals until either a match or the beginning of the
1389 * block is reached. */
1391 item = c->optimum_nodes[node_idx].item;
1392 if (item & OPTIMUM_LEN_MASK)
1394 c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
1398 if (item & OPTIMUM_EXTRA_FLAG) {
1403 /* Tally a rep0 match. */
1404 len = item & OPTIMUM_LEN_MASK;
1405 v = len - LZX_MIN_MATCH_LEN;
1406 if (v >= LZX_NUM_PRIMARY_LENS) {
1407 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1408 v = LZX_NUM_PRIMARY_LENS;
1410 c->freqs.main[LZX_NUM_CHARS + v]++;
1412 /* Tally a literal. */
1413 c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
1415 item = c->optimum_nodes[node_idx].extra_match;
1416 node_idx -= len + 1;
1419 len = item & OPTIMUM_LEN_MASK;
1420 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1424 /* Tally a match. */
1426 /* Tally the aligned offset symbol if needed. */
1427 if (offset_data >= 16)
1428 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1430 /* Tally the length symbol if needed. */
1431 v = len - LZX_MIN_MATCH_LEN;;
1432 if (v >= LZX_NUM_PRIMARY_LENS) {
1433 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1434 v = LZX_NUM_PRIMARY_LENS;
1437 /* Tally the main symbol. */
1438 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1439 v += offset_slot * LZX_NUM_LEN_HEADERS;
1440 c->freqs.main[LZX_NUM_CHARS + v]++;
1445 * Like lzx_tally_item_list(), but this function also generates the list of
1446 * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1447 * ready to be output to the bitstream after the Huffman codes are computed.
1448 * The lzx_sequences will be written to decreasing memory addresses as the path
1449 * is walked backwards, which means they will end up in the expected
1450 * first-to-last order. The return value is the index in c->chosen_sequences at
1451 * which the lzx_sequences begin.
1454 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1456 u32 node_idx = block_size;
1457 u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1460 /* Special value to mark last sequence */
1461 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1463 lit_start_node = node_idx;
1469 unsigned offset_slot;
1471 /* Tally literals until either a match or the beginning of the
1472 * block is reached. */
1474 item = c->optimum_nodes[node_idx].item;
1475 if (item & OPTIMUM_LEN_MASK)
1477 c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
1481 if (item & OPTIMUM_EXTRA_FLAG) {
1486 /* Save the literal run length for the next sequence
1487 * (the "previous sequence" when walking backwards). */
1488 len = item & OPTIMUM_LEN_MASK;
1489 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1491 lit_start_node = node_idx - len;
1493 /* Tally a rep0 match. */
1494 v = len - LZX_MIN_MATCH_LEN;
1495 c->chosen_sequences[seq_idx].adjusted_length = v;
1496 if (v >= LZX_NUM_PRIMARY_LENS) {
1497 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1498 v = LZX_NUM_PRIMARY_LENS;
1500 c->freqs.main[LZX_NUM_CHARS + v]++;
1501 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v;
1503 /* Tally a literal. */
1504 c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
1506 item = c->optimum_nodes[node_idx].extra_match;
1507 node_idx -= len + 1;
1510 len = item & OPTIMUM_LEN_MASK;
1511 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1513 /* Save the literal run length for the next sequence (the
1514 * "previous sequence" when walking backwards). */
1515 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1517 lit_start_node = node_idx;
1519 /* Record a match. */
1521 /* Tally the aligned offset symbol if needed. */
1522 if (offset_data >= 16)
1523 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1525 /* Save the adjusted length. */
1526 v = len - LZX_MIN_MATCH_LEN;
1527 c->chosen_sequences[seq_idx].adjusted_length = v;
1529 /* Tally the length symbol if needed. */
1530 if (v >= LZX_NUM_PRIMARY_LENS) {
1531 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1532 v = LZX_NUM_PRIMARY_LENS;
1535 /* Tally the main symbol. */
1536 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1537 v += offset_slot * LZX_NUM_LEN_HEADERS;
1538 c->freqs.main[LZX_NUM_CHARS + v]++;
1540 /* Save the adjusted offset and match header. */
1541 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1542 (offset_data << 9) | v;
1545 /* Save the literal run length for the first sequence. */
1546 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1548 /* Return the index in c->chosen_sequences at which the lzx_sequences
1554 * Find an inexpensive path through the graph of possible match/literal choices
1555 * for the current block. The nodes of the graph are
1556 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1557 * the current block, plus one extra node for end-of-block. The edges of the
1558 * graph are matches and literals. The goal is to find the minimum cost path
1559 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost
1562 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1563 * proceeding forwards one node at a time. At each node, a selection of matches
1564 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1565 * length 'len' provides a new path to reach the node 'len' bytes later. If
1566 * such a path is the lowest cost found so far to reach that later node, then
1567 * that later node is updated with the new path.
1569 * Note that although this algorithm is based on minimum cost path search, due
1570 * to various simplifying assumptions the result is not guaranteed to be the
1571 * true minimum cost, or "optimal", path over the graph of all valid LZX
1572 * representations of this block.
1574 * Also, note that because of the presence of the recent offsets queue (which is
1575 * a type of adaptive state), the algorithm cannot work backwards and compute
1576 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1577 * algorithm handles this adaptive state in the "minimum cost" parse is actually
1578 * only an approximation. It's possible for the globally optimal, minimum cost
1579 * path to contain a prefix, ending at a position, where that path prefix is
1580 * *not* the minimum cost path to that position. This can happen if such a path
1581 * prefix results in a different adaptive state which results in lower costs
1582 * later. The algorithm does not solve this problem; it only considers the
1583 * lowest cost to reach each individual position.
1585 static inline struct lzx_lru_queue
1586 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1587 const u8 * const restrict block_begin,
1588 const u32 block_size,
1589 const struct lzx_lru_queue initial_queue,
1592 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1593 struct lz_match *cache_ptr = c->match_cache;
1594 const u8 *in_next = block_begin;
1595 const u8 * const block_end = block_begin + block_size;
1597 /* Instead of storing the match offset LRU queues in the
1598 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1599 * storing them in a smaller array. This works because the algorithm
1600 * only requires a limited history of the adaptive state. Once a given
1601 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1602 * it is no longer needed. */
1603 struct lzx_lru_queue queues[512];
1605 STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1606 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1608 /* Initially, the cost to reach each node is "infinity". */
1609 memset(c->optimum_nodes, 0xFF,
1610 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1612 QUEUE(block_begin) = initial_queue;
1614 /* The following loop runs 'block_size' iterations, one per node. */
1616 unsigned num_matches;
1621 * A selection of matches for the block was already saved in
1622 * memory so that we don't have to run the uncompressed data
1623 * through the matchfinder on every optimization pass. However,
1624 * we still search for repeat offset matches during each
1625 * optimization pass because we cannot predict the state of the
1626 * recent offsets queue. But as a heuristic, we don't bother
1627 * searching for repeat offset matches if the general-purpose
1628 * matchfinder failed to find any matches.
1630 * Note that a match of length n at some offset implies there is
1631 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1632 * that same offset. In other words, we don't necessarily need
1633 * to use the full length of a match. The key heuristic that
1634 * saves a significicant amount of time is that for each
1635 * distinct length, we only consider the smallest offset for
1636 * which that length is available. This heuristic also applies
1637 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1638 * any explicit offset. Of course, this heuristic may be
1639 * produce suboptimal results because offset slots in LZX are
1640 * subject to entropy encoding, but in practice this is a useful
1644 num_matches = cache_ptr->length;
1648 struct lz_match *end_matches = cache_ptr + num_matches;
1649 unsigned next_len = LZX_MIN_MATCH_LEN;
1650 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1653 /* Consider R0 match */
1654 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1655 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1657 STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1659 u32 cost = cur_node->cost +
1660 c->costs.match_cost[0][
1661 next_len - LZX_MIN_MATCH_LEN];
1662 if (cost <= (cur_node + next_len)->cost) {
1663 (cur_node + next_len)->cost = cost;
1664 (cur_node + next_len)->item =
1665 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1667 if (unlikely(++next_len > max_len)) {
1668 cache_ptr = end_matches;
1671 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1675 /* Consider R1 match */
1676 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1677 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1679 if (matchptr[next_len - 1] != in_next[next_len - 1])
1681 for (unsigned len = 2; len < next_len - 1; len++)
1682 if (matchptr[len] != in_next[len])
1685 u32 cost = cur_node->cost +
1686 c->costs.match_cost[1][
1687 next_len - LZX_MIN_MATCH_LEN];
1688 if (cost <= (cur_node + next_len)->cost) {
1689 (cur_node + next_len)->cost = cost;
1690 (cur_node + next_len)->item =
1691 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1693 if (unlikely(++next_len > max_len)) {
1694 cache_ptr = end_matches;
1697 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1701 /* Consider R2 match */
1702 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1703 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1705 if (matchptr[next_len - 1] != in_next[next_len - 1])
1707 for (unsigned len = 2; len < next_len - 1; len++)
1708 if (matchptr[len] != in_next[len])
1711 u32 cost = cur_node->cost +
1712 c->costs.match_cost[2][
1713 next_len - LZX_MIN_MATCH_LEN];
1714 if (cost <= (cur_node + next_len)->cost) {
1715 (cur_node + next_len)->cost = cost;
1716 (cur_node + next_len)->item =
1717 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1719 if (unlikely(++next_len > max_len)) {
1720 cache_ptr = end_matches;
1723 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1727 while (next_len > cache_ptr->length)
1728 if (++cache_ptr == end_matches)
1731 /* Consider explicit offset matches */
1733 u32 offset = cache_ptr->offset;
1734 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1735 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1737 u32 base_cost = cur_node->cost;
1740 #if LZX_CONSIDER_ALIGNED_COSTS
1741 if (offset_data >= 16)
1742 base_cost += c->costs.aligned[offset_data &
1743 LZX_ALIGNED_OFFSET_BITMASK];
1747 c->costs.match_cost[offset_slot][
1748 next_len - LZX_MIN_MATCH_LEN];
1749 if (cost < (cur_node + next_len)->cost) {
1750 (cur_node + next_len)->cost = cost;
1751 (cur_node + next_len)->item =
1752 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1754 } while (++next_len <= cache_ptr->length);
1756 if (++cache_ptr == end_matches) {
1757 /* Consider match + lit + rep0 */
1758 u32 remaining = block_end - (in_next + next_len);
1759 if (likely(remaining >= 2)) {
1760 const u8 *strptr = in_next + next_len;
1761 const u8 *matchptr = strptr - offset;
1762 if (unlikely(load_u16_unaligned(strptr) == load_u16_unaligned(matchptr))) {
1763 u32 rep0_len = lz_extend(strptr, matchptr, 2,
1764 min(remaining, LZX_MAX_MATCH_LEN));
1765 u8 lit = strptr[-1];
1766 cost += c->costs.main[lit] +
1767 c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN];
1768 u32 total_len = next_len + rep0_len;
1769 if (cost < (cur_node + total_len)->cost) {
1770 (cur_node + total_len)->cost = cost;
1771 (cur_node + total_len)->item =
1772 OPTIMUM_EXTRA_FLAG | rep0_len;
1773 (cur_node + total_len)->extra_literal = lit;
1774 (cur_node + total_len)->extra_match =
1775 (offset_data << OPTIMUM_OFFSET_SHIFT) | (next_len - 1);
1786 /* Consider coding a literal.
1788 * To avoid an extra branch, actually checking the preferability
1789 * of coding the literal is integrated into the queue update
1791 literal = *in_next++;
1792 cost = cur_node->cost + c->costs.main[literal];
1794 /* Advance to the next position. */
1797 /* The lowest-cost path to the current position is now known.
1798 * Finalize the recent offsets queue that results from taking
1799 * this lowest-cost path. */
1801 if (cost <= cur_node->cost) {
1802 /* Literal: queue remains unchanged. */
1803 cur_node->cost = cost;
1804 cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1805 QUEUE(in_next) = QUEUE(in_next - 1);
1807 /* Match: queue update is needed. */
1808 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1809 u32 offset_data = (cur_node->item &
1810 ~OPTIMUM_EXTRA_FLAG) >> OPTIMUM_OFFSET_SHIFT;
1811 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1812 /* Explicit offset match: insert offset at front */
1814 lzx_lru_queue_push(QUEUE(in_next - len),
1815 offset_data - LZX_OFFSET_ADJUSTMENT);
1816 } else if (cur_node->item & OPTIMUM_EXTRA_FLAG) {
1817 /* Explicit offset match, then literal, then
1818 * rep0 match: insert offset at front */
1819 len += 1 + (cur_node->extra_match & OPTIMUM_LEN_MASK);
1821 lzx_lru_queue_push(QUEUE(in_next - len),
1822 (cur_node->extra_match >> OPTIMUM_OFFSET_SHIFT) -
1823 LZX_OFFSET_ADJUSTMENT);
1825 /* Repeat offset match: swap offset to front */
1827 lzx_lru_queue_swap(QUEUE(in_next - len),
1831 } while (in_next != block_end);
1833 /* Return the match offset queue at the end of the minimum cost path. */
1834 return QUEUE(block_end);
1837 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1839 lzx_compute_match_costs(struct lzx_compressor *c)
1841 unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1842 LZX_NUM_LEN_HEADERS;
1843 struct lzx_costs *costs = &c->costs;
1845 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1847 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1848 unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
1849 LZX_NUM_LEN_HEADERS);
1852 #if LZX_CONSIDER_ALIGNED_COSTS
1853 if (offset_slot >= 8)
1854 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1857 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1858 costs->match_cost[offset_slot][i] =
1859 costs->main[main_symbol++] + extra_cost;
1861 extra_cost += costs->main[main_symbol];
1863 for (; i < LZX_NUM_LENS; i++)
1864 costs->match_cost[offset_slot][i] =
1865 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1869 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1872 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1875 bool have_byte[256];
1876 unsigned num_used_bytes;
1878 /* The costs below are hard coded to use a scaling factor of 64. */
1879 STATIC_ASSERT(LZX_BIT_COST == 64);
1884 * - Use smaller initial costs for literal symbols when the input buffer
1885 * contains fewer distinct bytes.
1887 * - Assume that match symbols are more costly than literal symbols.
1889 * - Assume that length symbols for shorter lengths are less costly than
1890 * length symbols for longer lengths.
1893 for (i = 0; i < 256; i++)
1894 have_byte[i] = false;
1896 for (i = 0; i < block_size; i++)
1897 have_byte[block[i]] = true;
1900 for (i = 0; i < 256; i++)
1901 num_used_bytes += have_byte[i];
1903 for (i = 0; i < 256; i++)
1904 c->costs.main[i] = 560 - (256 - num_used_bytes);
1906 for (; i < c->num_main_syms; i++)
1907 c->costs.main[i] = 680;
1909 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1910 c->costs.len[i] = 412 + i;
1912 #if LZX_CONSIDER_ALIGNED_COSTS
1913 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1914 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1917 lzx_compute_match_costs(c);
1920 /* Update the current cost model to reflect the computed Huffman codes. */
1922 lzx_set_costs_from_codes(struct lzx_compressor *c)
1925 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1927 for (i = 0; i < c->num_main_syms; i++) {
1928 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
1929 MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
1932 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1933 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
1934 LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
1937 #if LZX_CONSIDER_ALIGNED_COSTS
1938 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1939 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
1940 ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
1944 lzx_compute_match_costs(c);
1948 * Choose a "near-optimal" literal/match sequence to use for the current block.
1949 * Because the cost of each Huffman symbol is unknown until the Huffman codes
1950 * have been built and the Huffman codes themselves depend on the symbol
1951 * frequencies, this uses an iterative optimization algorithm to approximate an
1952 * optimal solution. The first optimization pass for the block uses default
1953 * costs. Additional passes use costs taken from the Huffman codes computed in
1954 * the previous pass.
1956 static inline struct lzx_lru_queue
1957 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1958 struct lzx_output_bitstream * const restrict os,
1959 const u8 * const restrict block_begin,
1960 const u32 block_size,
1961 const struct lzx_lru_queue initial_queue,
1964 unsigned num_passes_remaining = c->num_optim_passes;
1965 struct lzx_lru_queue new_queue;
1968 lzx_set_default_costs(c, block_begin, block_size);
1971 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1972 initial_queue, is_16_bit);
1974 if (--num_passes_remaining == 0)
1977 /* At least one optimization pass remains; update the costs. */
1978 lzx_reset_symbol_frequencies(c);
1979 lzx_tally_item_list(c, block_size, is_16_bit);
1980 lzx_make_huffman_codes(c);
1981 lzx_set_costs_from_codes(c);
1984 /* Done optimizing. Generate the sequence list and flush the block. */
1985 lzx_reset_symbol_frequencies(c);
1986 seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1987 lzx_flush_block(c, os, block_begin, block_size, seq_idx);
1992 * This is the "near-optimal" LZX compressor.
1994 * For each block, it performs a relatively thorough graph search to find an
1995 * inexpensive (in terms of compressed size) way to output that block.
1997 * Note: there are actually many things this algorithm leaves on the table in
1998 * terms of compression ratio. So although it may be "near-optimal", it is
1999 * certainly not "optimal". The goal is not to produce the optimal compression
2000 * ratio, which for LZX is probably impossible within any practical amount of
2001 * time, but rather to produce a compression ratio significantly better than a
2002 * simpler "greedy" or "lazy" parse while still being relatively fast.
2005 lzx_compress_near_optimal(struct lzx_compressor * restrict c,
2006 const u8 * const restrict in_begin,
2007 struct lzx_output_bitstream * restrict os,
2010 const u8 * in_next = in_begin;
2011 const u8 * const in_end = in_begin + c->in_nbytes;
2012 u32 max_len = LZX_MAX_MATCH_LEN;
2013 u32 nice_len = min(c->nice_match_length, max_len);
2014 u32 next_hashes[2] = {};
2015 struct lzx_lru_queue queue;
2017 CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
2018 lzx_lru_queue_init(&queue);
2021 /* Starting a new block */
2022 const u8 * const in_block_begin = in_next;
2023 const u8 * const in_max_block_end =
2024 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2025 struct lz_match *cache_ptr = c->match_cache;
2026 const u8 *next_search_pos = in_next;
2027 const u8 *next_observation = in_next;
2028 const u8 *next_pause_point = min(in_next + MIN_BLOCK_SIZE,
2029 in_max_block_end - LZX_MAX_MATCH_LEN - 1);
2031 init_block_split_stats(&c->split_stats);
2033 /* Run the block through the matchfinder and cache the matches. */
2036 if (in_next >= next_search_pos) {
2037 struct lz_match *lz_matchptr;
2040 lz_matchptr = CALL_BT_MF(is_16_bit, c,
2041 bt_matchfinder_get_matches,
2046 c->max_search_depth,
2050 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
2051 cache_ptr = lz_matchptr;
2053 if (in_next >= next_observation) {
2054 best_len = cache_ptr[-1].length;
2056 observe_match(&c->split_stats, best_len);
2057 next_observation = in_next + best_len;
2059 observe_literal(&c->split_stats, *in_next);
2060 next_observation = in_next + 1;
2064 * If there was a very long match found, then don't
2065 * cache any matches for the bytes covered by that
2066 * match. This avoids degenerate behavior when
2067 * compressing highly redundant data, where the number
2068 * of matches can be very large.
2070 * This heuristic doesn't actually hurt the compression
2071 * ratio very much. If there's a long match, then the
2072 * data must be highly compressible, so it doesn't
2073 * matter as much what we do.
2075 if (best_len >= nice_len) {
2076 next_search_pos = in_next + best_len;
2077 next_observation = next_search_pos;
2080 CALL_BT_MF(is_16_bit, c,
2081 bt_matchfinder_skip_position,
2085 c->max_search_depth,
2087 cache_ptr->length = 0;
2090 } while (++in_next < next_pause_point &&
2091 likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
2093 if (unlikely(cache_ptr >= &c->match_cache[LZX_CACHE_LENGTH]))
2096 if (max_len > in_end - in_next) {
2097 max_len = in_end - in_next;
2098 nice_len = min(max_len, nice_len);
2099 if (unlikely(max_len < BT_MATCHFINDER_REQUIRED_NBYTES)) {
2100 while (in_next != in_end) {
2102 cache_ptr->length = 0;
2108 if (in_next >= in_max_block_end)
2111 if (c->split_stats.num_new_observations >= NUM_OBSERVATIONS_PER_BLOCK_CHECK) {
2112 if (do_end_block_check(&c->split_stats, in_next - in_block_begin))
2114 if (in_max_block_end - in_next <= MIN_BLOCK_SIZE)
2115 next_observation = in_max_block_end;
2118 next_pause_point = min(in_next +
2119 NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 -
2120 c->split_stats.num_new_observations,
2121 in_max_block_end - LZX_MAX_MATCH_LEN - 1);
2125 /* We've finished running the block through the matchfinder.
2126 * Now choose a match/literal sequence and write the block. */
2128 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
2129 in_next - in_block_begin,
2131 } while (in_next != in_end);
2135 lzx_compress_near_optimal_16(struct lzx_compressor *c,
2136 struct lzx_output_bitstream *os)
2138 lzx_compress_near_optimal(c, c->in_buffer, os, true);
2142 lzx_compress_near_optimal_32(struct lzx_compressor *c,
2143 struct lzx_output_bitstream *os)
2145 lzx_compress_near_optimal(c, c->in_buffer, os, false);
2149 * Given a pointer to the current byte sequence and the current list of recent
2150 * match offsets, find the longest repeat offset match.
2152 * If no match of at least 2 bytes is found, then return 0.
2154 * If a match of at least 2 bytes is found, then return its length and set
2155 * *rep_max_idx_ret to the index of its offset in @queue.
2158 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
2159 const u32 bytes_remaining,
2160 const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
2161 unsigned *rep_max_idx_ret)
2163 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2165 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
2166 const u16 next_2_bytes = load_u16_unaligned(in_next);
2168 unsigned rep_max_len;
2169 unsigned rep_max_idx;
2172 matchptr = in_next - recent_offsets[0];
2173 if (load_u16_unaligned(matchptr) == next_2_bytes)
2174 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
2179 matchptr = in_next - recent_offsets[1];
2180 if (load_u16_unaligned(matchptr) == next_2_bytes) {
2181 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2182 if (rep_len > rep_max_len) {
2183 rep_max_len = rep_len;
2188 matchptr = in_next - recent_offsets[2];
2189 if (load_u16_unaligned(matchptr) == next_2_bytes) {
2190 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2191 if (rep_len > rep_max_len) {
2192 rep_max_len = rep_len;
2197 *rep_max_idx_ret = rep_max_idx;
2201 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
2202 static inline unsigned
2203 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
2205 unsigned score = len;
2207 if (adjusted_offset < 4096)
2210 if (adjusted_offset < 256)
2216 static inline unsigned
2217 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2222 /* This is the "lazy" LZX compressor. */
2224 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2227 const u8 * const in_begin = c->in_buffer;
2228 const u8 * in_next = in_begin;
2229 const u8 * const in_end = in_begin + c->in_nbytes;
2230 unsigned max_len = LZX_MAX_MATCH_LEN;
2231 unsigned nice_len = min(c->nice_match_length, max_len);
2232 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2233 u32 recent_offsets[3] = {1, 1, 1};
2234 u32 next_hashes[2] = {};
2236 CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2239 /* Starting a new block */
2241 const u8 * const in_block_begin = in_next;
2242 const u8 * const in_max_block_end =
2243 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2244 struct lzx_sequence *next_seq = c->chosen_sequences;
2247 u32 cur_offset_data;
2251 u32 next_offset_data;
2252 unsigned next_score;
2253 unsigned rep_max_len;
2254 unsigned rep_max_idx;
2259 lzx_reset_symbol_frequencies(c);
2260 init_block_split_stats(&c->split_stats);
2263 if (unlikely(max_len > in_end - in_next)) {
2264 max_len = in_end - in_next;
2265 nice_len = min(max_len, nice_len);
2268 /* Find the longest match at the current position. */
2270 cur_len = CALL_HC_MF(is_16_bit, c,
2271 hc_matchfinder_longest_match,
2277 c->max_search_depth,
2282 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2283 cur_offset != recent_offsets[0] &&
2284 cur_offset != recent_offsets[1] &&
2285 cur_offset != recent_offsets[2]))
2287 /* There was no match found, or the only match found
2288 * was a distant length 3 match. Output a literal. */
2289 lzx_record_literal(c, *in_next, &litrunlen);
2290 observe_literal(&c->split_stats, *in_next);
2295 observe_match(&c->split_stats, cur_len);
2297 if (cur_offset == recent_offsets[0]) {
2299 cur_offset_data = 0;
2300 skip_len = cur_len - 1;
2301 goto choose_cur_match;
2304 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2305 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2307 /* Consider a repeat offset match */
2308 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2314 if (rep_max_len >= 3 &&
2315 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2316 rep_max_idx)) >= cur_score)
2318 cur_len = rep_max_len;
2319 cur_offset_data = rep_max_idx;
2320 skip_len = rep_max_len - 1;
2321 goto choose_cur_match;
2326 /* We have a match at the current position. */
2328 /* If we have a very long match, choose it immediately. */
2329 if (cur_len >= nice_len) {
2330 skip_len = cur_len - 1;
2331 goto choose_cur_match;
2334 /* See if there's a better match at the next position. */
2336 if (unlikely(max_len > in_end - in_next)) {
2337 max_len = in_end - in_next;
2338 nice_len = min(max_len, nice_len);
2341 next_len = CALL_HC_MF(is_16_bit, c,
2342 hc_matchfinder_longest_match,
2348 c->max_search_depth / 2,
2352 if (next_len <= cur_len - 2) {
2354 skip_len = cur_len - 2;
2355 goto choose_cur_match;
2358 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2359 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2361 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2367 if (rep_max_len >= 3 &&
2368 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2369 rep_max_idx)) >= next_score)
2372 if (rep_score > cur_score) {
2373 /* The next match is better, and it's a
2374 * repeat offset match. */
2375 lzx_record_literal(c, *(in_next - 2),
2377 cur_len = rep_max_len;
2378 cur_offset_data = rep_max_idx;
2379 skip_len = cur_len - 1;
2380 goto choose_cur_match;
2383 if (next_score > cur_score) {
2384 /* The next match is better, and it's an
2385 * explicit offset match. */
2386 lzx_record_literal(c, *(in_next - 2),
2389 cur_offset_data = next_offset_data;
2390 cur_score = next_score;
2391 goto have_cur_match;
2395 /* The original match was better. */
2396 skip_len = cur_len - 2;
2399 lzx_record_match(c, cur_len, cur_offset_data,
2400 recent_offsets, is_16_bit,
2401 &litrunlen, &next_seq);
2402 in_next = CALL_HC_MF(is_16_bit, c,
2403 hc_matchfinder_skip_positions,
2409 } while (in_next < in_max_block_end &&
2410 !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2412 lzx_finish_sequence(next_seq, litrunlen);
2414 lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2416 } while (in_next != in_end);
2420 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2422 lzx_compress_lazy(c, os, true);
2426 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2428 lzx_compress_lazy(c, os, false);
2431 /* Generate the acceleration tables for offset slots. */
2433 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2435 u32 adjusted_offset = 0;
2439 for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2442 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2444 c->offset_slot_tab_1[adjusted_offset] = slot;
2447 /* slots [30, 49] */
2448 for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2449 adjusted_offset += (u32)1 << 14)
2451 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2453 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2458 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2460 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2461 if (lzx_is_16_bit(max_bufsize))
2462 return offsetof(struct lzx_compressor, hc_mf_16) +
2463 hc_matchfinder_size_16(max_bufsize);
2465 return offsetof(struct lzx_compressor, hc_mf_32) +
2466 hc_matchfinder_size_32(max_bufsize);
2468 if (lzx_is_16_bit(max_bufsize))
2469 return offsetof(struct lzx_compressor, bt_mf_16) +
2470 bt_matchfinder_size_16(max_bufsize);
2472 return offsetof(struct lzx_compressor, bt_mf_32) +
2473 bt_matchfinder_size_32(max_bufsize);
2478 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2483 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2486 size += lzx_get_compressor_size(max_bufsize, compression_level);
2488 size += max_bufsize; /* in_buffer */
2493 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2494 bool destructive, void **c_ret)
2496 unsigned window_order;
2497 struct lzx_compressor *c;
2499 window_order = lzx_get_window_order(max_bufsize);
2500 if (window_order == 0)
2501 return WIMLIB_ERR_INVALID_PARAM;
2503 c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2507 c->destructive = destructive;
2509 c->num_main_syms = lzx_get_num_main_syms(window_order);
2510 c->window_order = window_order;
2512 if (!c->destructive) {
2513 c->in_buffer = MALLOC(max_bufsize);
2518 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2520 /* Fast compression: Use lazy parsing. */
2522 if (lzx_is_16_bit(max_bufsize))
2523 c->impl = lzx_compress_lazy_16;
2525 c->impl = lzx_compress_lazy_32;
2526 c->max_search_depth = (60 * compression_level) / 20;
2527 c->nice_match_length = (80 * compression_level) / 20;
2529 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2530 * halves the max_search_depth when attempting a lazy match, and
2531 * max_search_depth cannot be 0. */
2532 if (c->max_search_depth < 2)
2533 c->max_search_depth = 2;
2536 /* Normal / high compression: Use near-optimal parsing. */
2538 if (lzx_is_16_bit(max_bufsize))
2539 c->impl = lzx_compress_near_optimal_16;
2541 c->impl = lzx_compress_near_optimal_32;
2543 /* Scale nice_match_length and max_search_depth with the
2544 * compression level. */
2545 c->max_search_depth = (24 * compression_level) / 50;
2546 c->nice_match_length = (48 * compression_level) / 50;
2548 /* Set a number of optimization passes appropriate for the
2549 * compression level. */
2551 c->num_optim_passes = 1;
2553 if (compression_level >= 45)
2554 c->num_optim_passes++;
2556 /* Use more optimization passes for higher compression levels.
2557 * But the more passes there are, the less they help --- so
2558 * don't add them linearly. */
2559 if (compression_level >= 70) {
2560 c->num_optim_passes++;
2561 if (compression_level >= 100)
2562 c->num_optim_passes++;
2563 if (compression_level >= 150)
2564 c->num_optim_passes++;
2565 if (compression_level >= 200)
2566 c->num_optim_passes++;
2567 if (compression_level >= 300)
2568 c->num_optim_passes++;
2572 /* max_search_depth == 0 is invalid. */
2573 if (c->max_search_depth < 1)
2574 c->max_search_depth = 1;
2576 if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2577 c->nice_match_length = LZX_MAX_MATCH_LEN;
2579 lzx_init_offset_slot_tabs(c);
2586 return WIMLIB_ERR_NOMEM;
2590 lzx_compress(const void *restrict in, size_t in_nbytes,
2591 void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2593 struct lzx_compressor *c = _c;
2594 struct lzx_output_bitstream os;
2597 /* Don't bother trying to compress very small inputs. */
2598 if (in_nbytes < 100)
2601 /* Copy the input data into the internal buffer and preprocess it. */
2603 c->in_buffer = (void *)in;
2605 memcpy(c->in_buffer, in, in_nbytes);
2606 c->in_nbytes = in_nbytes;
2607 lzx_preprocess(c->in_buffer, in_nbytes);
2609 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2611 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2613 /* Initialize the output bitstream. */
2614 lzx_init_output(&os, out, out_nbytes_avail);
2616 /* Call the compression level-specific compress() function. */
2619 /* Flush the output bitstream and return the compressed size or 0. */
2620 result = lzx_flush_output(&os);
2621 if (!result && c->destructive)
2622 lzx_postprocess(c->in_buffer, c->in_nbytes);
2627 lzx_free_compressor(void *_c)
2629 struct lzx_compressor *c = _c;
2631 if (!c->destructive)
2636 const struct compressor_ops lzx_compressor_ops = {
2637 .get_needed_memory = lzx_get_needed_memory,
2638 .create_compressor = lzx_create_compressor,
2639 .compress = lzx_compress,
2640 .free_compressor = lzx_free_compressor,