4 * A compressor for the LZX compression format, as used in WIM files.
8 * Copyright (C) 2012-2016 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
30 * "Near-optimal" is significantly slower than "lazy", but results in a better
31 * compression ratio. The "near-optimal" algorithm is used at the default
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * Note: LZX is a compression format derived from DEFLATE, the format used by
39 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
40 * Certain details are quite similar, such as the method for storing Huffman
41 * codes. However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the parser is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
68 * The compressor always chooses a block of at least MIN_BLOCK_SIZE bytes,
69 * except if the last block has to be shorter.
71 #define MIN_BLOCK_SIZE 6500
74 * The compressor attempts to end blocks after SOFT_MAX_BLOCK_SIZE bytes, but
75 * the final size might be larger due to matches extending beyond the end of the
76 * block. Specifically:
78 * - The greedy parser may choose an arbitrarily long match starting at the
79 * SOFT_MAX_BLOCK_SIZE'th byte.
81 * - The lazy parser may choose a sequence of literals starting at the
82 * SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasing good
83 * matches. The final match may be of arbitrary length. The length of the
84 * literal sequence is approximately limited by the "nice match length"
87 #define SOFT_MAX_BLOCK_SIZE 100000
90 * The number of observed matches or literals that represents sufficient data to
91 * decide whether the current block should be terminated or not.
93 #define NUM_OBSERVATIONS_PER_BLOCK_CHECK 500
96 * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
97 * excluding the extra "overflow" entries. This value should be high enough so
98 * that nearly the time, all matches found in a given block can fit in the match
99 * cache. However, fallback behavior (immediately terminating the block) on
100 * cache overflow is still required.
102 #define LZX_CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5)
105 * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
106 * ever be saved in the match cache for a single position. Since each match we
107 * save for a single position has a distinct length, we can use the number of
108 * possible match lengths in LZX as this bound. This bound is guaranteed to be
109 * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
110 * it will never actually be reached.
112 #define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
115 * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
116 * This makes it possible to consider fractional bit costs.
118 * Note: this is only useful as a statistical trick for when the true costs are
119 * unknown. In reality, each token in LZX requires a whole number of bits to
122 #define LZX_BIT_COST 64
125 * Should the compressor take into account the costs of aligned offset symbols?
127 #define LZX_CONSIDER_ALIGNED_COSTS 1
130 * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
133 #define LZX_MAX_FAST_LEVEL 34
136 * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
137 * hash table for finding length 2 matches. This could be as high as 16, but
138 * using a smaller hash table speeds up compression due to reduced cache
141 #define BT_MATCHFINDER_HASH2_ORDER 12
144 * These are the compressor-side limits on the codeword lengths for each Huffman
145 * code. To make outputting bits slightly faster, some of these limits are
146 * lower than the limits defined by the LZX format. This does not significantly
147 * affect the compression ratio, at least for the block sizes we use.
149 #define MAIN_CODEWORD_LIMIT 16
150 #define LENGTH_CODEWORD_LIMIT 12
151 #define ALIGNED_CODEWORD_LIMIT 7
152 #define PRE_CODEWORD_LIMIT 7
154 #include "wimlib/compress_common.h"
155 #include "wimlib/compressor_ops.h"
156 #include "wimlib/error.h"
157 #include "wimlib/lz_extend.h"
158 #include "wimlib/lzx_common.h"
159 #include "wimlib/unaligned.h"
160 #include "wimlib/util.h"
162 /* Matchfinders with 16-bit positions */
164 #define MF_SUFFIX _16
165 #include "wimlib/bt_matchfinder.h"
166 #include "wimlib/hc_matchfinder.h"
168 /* Matchfinders with 32-bit positions */
172 #define MF_SUFFIX _32
173 #include "wimlib/bt_matchfinder.h"
174 #include "wimlib/hc_matchfinder.h"
176 struct lzx_output_bitstream;
178 /* Codewords for the LZX Huffman codes. */
179 struct lzx_codewords {
180 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
181 u32 len[LZX_LENCODE_NUM_SYMBOLS];
182 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
185 /* Codeword lengths (in bits) for the LZX Huffman codes.
186 * A zero length means the corresponding codeword has zero frequency. */
188 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
189 u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
190 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
193 /* Cost model for near-optimal parsing */
196 /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a
197 * length 'len' match that has an offset belonging to 'offset_slot'. */
198 u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
200 /* Cost for each symbol in the main code */
201 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
203 /* Cost for each symbol in the length code */
204 u32 len[LZX_LENCODE_NUM_SYMBOLS];
206 #if LZX_CONSIDER_ALIGNED_COSTS
207 /* Cost for each symbol in the aligned code */
208 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
212 /* Codewords and lengths for the LZX Huffman codes. */
214 struct lzx_codewords codewords;
215 struct lzx_lens lens;
218 /* Symbol frequency counters for the LZX Huffman codes. */
220 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
221 u32 len[LZX_LENCODE_NUM_SYMBOLS];
222 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
225 /* Block split statistics. See "Block splitting algorithm" below. */
226 #define NUM_LITERAL_OBSERVATION_TYPES 8
227 #define NUM_MATCH_OBSERVATION_TYPES 2
228 #define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
229 struct block_split_stats {
230 u32 new_observations[NUM_OBSERVATION_TYPES];
231 u32 observations[NUM_OBSERVATION_TYPES];
232 u32 num_new_observations;
233 u32 num_observations;
237 * Represents a run of literals followed by a match or end-of-block. This
238 * struct is needed to temporarily store items chosen by the parser, since items
239 * cannot be written until all items for the block have been chosen and the
240 * block's Huffman codes have been computed.
242 struct lzx_sequence {
244 /* The number of literals in the run. This may be 0. The literals are
245 * not stored explicitly in this structure; instead, they are read
246 * directly from the uncompressed data. */
249 /* If the next field doesn't indicate end-of-block, then this is the
250 * match length minus LZX_MIN_MATCH_LEN. */
253 /* If bit 31 is clear, then this field contains the match header in bits
254 * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
255 * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
256 * sequence's literal run was the last literal run in the block, so
257 * there is no match that follows it. */
258 u32 adjusted_offset_and_match_hdr;
262 * This structure represents a byte position in the input buffer and a node in
263 * the graph of possible match/literal choices.
265 * Logically, each incoming edge to this node is labeled with a literal or a
266 * match that can be taken to reach this position from an earlier position; and
267 * each outgoing edge from this node is labeled with a literal or a match that
268 * can be taken to advance from this position to a later position.
270 struct lzx_optimum_node {
272 /* The cost, in bits, of the lowest-cost path that has been found to
273 * reach this position. This can change as progressively lower cost
274 * paths are found to reach this position. */
278 * The match or literal that was taken to reach this position. This can
279 * change as progressively lower cost paths are found to reach this
282 * This variable is divided into two bitfields.
285 * Low bits are 0, high bits are the literal.
287 * Explicit offset matches:
288 * Low bits are the match length, high bits are the offset plus 2.
290 * Repeat offset matches:
291 * Low bits are the match length, high bits are the queue index.
294 #define OPTIMUM_OFFSET_SHIFT 9
295 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
296 } _aligned_attribute(8);
299 * Least-recently-used queue for match offsets.
301 * This is represented as a 64-bit integer for efficiency. There are three
302 * offsets of 21 bits each. Bit 64 is garbage.
304 struct lzx_lru_queue {
308 #define LZX_QUEUE64_OFFSET_SHIFT 21
309 #define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1)
311 #define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT)
312 #define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT)
313 #define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT)
315 #define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT)
316 #define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT)
317 #define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT)
320 lzx_lru_queue_init(struct lzx_lru_queue *queue)
322 queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) |
323 ((u64)1 << LZX_QUEUE64_R1_SHIFT) |
324 ((u64)1 << LZX_QUEUE64_R2_SHIFT);
328 lzx_lru_queue_R0(struct lzx_lru_queue queue)
330 return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
334 lzx_lru_queue_R1(struct lzx_lru_queue queue)
336 return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
340 lzx_lru_queue_R2(struct lzx_lru_queue queue)
342 return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK;
345 /* Push a match offset onto the front (most recently used) end of the queue. */
346 static inline struct lzx_lru_queue
347 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
349 return (struct lzx_lru_queue) {
350 .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset,
354 /* Swap a match offset to the front of the queue. */
355 static inline struct lzx_lru_queue
356 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
362 return (struct lzx_lru_queue) {
363 .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) |
364 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) |
365 (queue.R & LZX_QUEUE64_R2_MASK),
368 return (struct lzx_lru_queue) {
369 .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) |
370 (queue.R & LZX_QUEUE64_R1_MASK) |
371 (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT),
375 /* The main LZX compressor structure */
376 struct lzx_compressor {
378 /* The "nice" match length: if a match of this length is found, then
379 * choose it immediately without further consideration. */
380 unsigned nice_match_length;
382 /* The maximum search depth: consider at most this many potential
383 * matches at each position. */
384 unsigned max_search_depth;
386 /* The log base 2 of the LZX window size for LZ match offset encoding
387 * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <=
388 * LZX_MAX_WINDOW_ORDER. */
389 unsigned window_order;
391 /* The number of symbols in the main alphabet. This depends on
392 * @window_order, since @window_order determines the maximum possible
394 unsigned num_main_syms;
396 /* Number of optimization passes per block */
397 unsigned num_optim_passes;
399 /* The preprocessed buffer of data being compressed */
402 /* The number of bytes of data to be compressed, which is the number of
403 * bytes of data in @in_buffer that are actually valid. */
406 /* Pointer to the compress() implementation chosen at allocation time */
407 void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
409 /* If true, the compressor need not preserve the input buffer if it
410 * compresses the data successfully. */
413 /* The Huffman symbol frequency counters for the current block. */
414 struct lzx_freqs freqs;
416 /* Block split statistics. */
417 struct block_split_stats split_stats;
419 /* The Huffman codes for the current and previous blocks. The one with
420 * index 'codes_index' is for the current block, and the other one is
421 * for the previous block. */
422 struct lzx_codes codes[2];
423 unsigned codes_index;
425 /* The matches and literals that the parser has chosen for the current
426 * block. The required length of this array is limited by the maximum
427 * number of matches that can ever be chosen for a single block, plus
428 * one for the special entry at the end. */
429 struct lzx_sequence chosen_sequences[
430 DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
432 /* Tables for mapping adjusted offsets to offset slots */
434 /* offset slots [0, 29] */
435 u8 offset_slot_tab_1[32768];
437 /* offset slots [30, 49] */
438 u8 offset_slot_tab_2[128];
441 /* Data for greedy or lazy parsing */
443 /* Hash chains matchfinder (MUST BE LAST!!!) */
445 struct hc_matchfinder_16 hc_mf_16;
446 struct hc_matchfinder_32 hc_mf_32;
450 /* Data for near-optimal parsing */
453 * Array of nodes, one per position, for running the
454 * minimum-cost path algorithm.
456 * This array must be large enough to accommodate the
457 * worst-case number of nodes, which occurs if we find a
458 * match of length LZX_MAX_MATCH_LEN at position
459 * SOFT_MAX_BLOCK_SIZE - 1, producing a block of length
460 * SOFT_MAX_BLOCK_SIZE - 1 + LZX_MAX_MATCH_LEN. Add one
461 * for the end-of-block node.
463 struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_SIZE - 1 +
464 LZX_MAX_MATCH_LEN + 1];
466 /* The cost model for the current block */
467 struct lzx_costs costs;
470 * Cached matches for the current block. This array
471 * contains the matches that were found at each position
472 * in the block. Specifically, for each position, there
473 * is a special 'struct lz_match' whose 'length' field
474 * contains the number of matches that were found at
475 * that position; this is followed by the matches
476 * themselves, if any, sorted by strictly increasing
479 * Note: in rare cases, there will be a very high number
480 * of matches in the block and this array will overflow.
481 * If this happens, we force the end of the current
482 * block. LZX_CACHE_LENGTH is the length at which we
483 * actually check for overflow. The extra slots beyond
484 * this are enough to absorb the worst case overflow,
485 * which occurs if starting at
486 * &match_cache[LZX_CACHE_LENGTH - 1], we write the
487 * match count header, then write
488 * LZX_MAX_MATCHES_PER_POS matches, then skip searching
489 * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
490 * write the match count header for each.
492 struct lz_match match_cache[LZX_CACHE_LENGTH +
493 LZX_MAX_MATCHES_PER_POS +
494 LZX_MAX_MATCH_LEN - 1];
496 /* Binary trees matchfinder (MUST BE LAST!!!) */
498 struct bt_matchfinder_16 bt_mf_16;
499 struct bt_matchfinder_32 bt_mf_32;
506 * Will a matchfinder using 16-bit positions be sufficient for compressing
507 * buffers of up to the specified size? The limit could be 65536 bytes, but we
508 * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
509 * This requires that the limit be no more than the length of offset_slot_tab_1
513 lzx_is_16_bit(size_t max_bufsize)
515 STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
516 return max_bufsize <= 32768;
520 * The following macros call either the 16-bit or the 32-bit version of a
521 * matchfinder function based on the value of 'is_16_bit', which will be known
522 * at compilation time.
525 #define CALL_HC_MF(is_16_bit, c, funcname, ...) \
526 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
527 CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
529 #define CALL_BT_MF(is_16_bit, c, funcname, ...) \
530 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
531 CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
534 * Structure to keep track of the current state of sending bits to the
535 * compressed output buffer.
537 * The LZX bitstream is encoded as a sequence of 16-bit coding units.
539 struct lzx_output_bitstream {
541 /* Bits that haven't yet been written to the output buffer. */
542 machine_word_t bitbuf;
544 /* Number of bits currently held in @bitbuf. */
547 /* Pointer to the start of the output buffer. */
550 /* Pointer to the position in the output buffer at which the next coding
551 * unit should be written. */
554 /* Pointer just past the end of the output buffer, rounded down to a
555 * 2-byte boundary. */
559 /* Can the specified number of bits always be added to 'bitbuf' after any
560 * pending 16-bit coding units have been flushed? */
561 #define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15)
564 * Initialize the output bitstream.
567 * The output bitstream structure to initialize.
569 * The buffer being written to.
571 * Size of @buffer, in bytes.
574 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
579 os->next = os->start;
580 os->end = os->start + (size & ~1);
583 /* Add some bits to the bitbuffer variable of the output bitstream. The caller
584 * must make sure there is enough room. */
586 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
588 os->bitbuf = (os->bitbuf << num_bits) | bits;
589 os->bitcount += num_bits;
592 /* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
593 * specifies the maximum number of bits that may have been added since the last
596 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
598 /* Masking the number of bits to shift is only needed to avoid undefined
599 * behavior; we don't actually care about the results of bad shifts. On
600 * x86, the explicit masking generates no extra code. */
601 const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
603 if (os->end - os->next < 6)
605 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
606 shift_mask), os->next + 0);
607 if (max_num_bits > 16)
608 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
609 shift_mask), os->next + 2);
610 if (max_num_bits > 32)
611 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
612 shift_mask), os->next + 4);
613 os->next += (os->bitcount >> 4) << 1;
617 /* Add at most 16 bits to the bitbuffer and flush it. */
619 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
621 lzx_add_bits(os, bits, num_bits);
622 lzx_flush_bits(os, 16);
626 * Flush the last coding unit to the output buffer if needed. Return the total
627 * number of bytes written to the output buffer, or 0 if an overflow occurred.
630 lzx_flush_output(struct lzx_output_bitstream *os)
632 if (os->end - os->next < 6)
635 if (os->bitcount != 0) {
636 put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
640 return os->next - os->start;
643 /* Build the main, length, and aligned offset Huffman codes used in LZX.
645 * This takes as input the frequency tables for each code and produces as output
646 * a set of tables that map symbols to codewords and codeword lengths. */
648 lzx_make_huffman_codes(struct lzx_compressor *c)
650 const struct lzx_freqs *freqs = &c->freqs;
651 struct lzx_codes *codes = &c->codes[c->codes_index];
653 STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
654 MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
655 STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
656 LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
657 STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
658 ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
660 make_canonical_huffman_code(c->num_main_syms,
664 codes->codewords.main);
666 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
667 LENGTH_CODEWORD_LIMIT,
670 codes->codewords.len);
672 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
673 ALIGNED_CODEWORD_LIMIT,
676 codes->codewords.aligned);
679 /* Reset the symbol frequencies for the LZX Huffman codes. */
681 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
683 memset(&c->freqs, 0, sizeof(c->freqs));
687 lzx_compute_precode_items(const u8 lens[restrict],
688 const u8 prev_lens[restrict],
689 u32 precode_freqs[restrict],
690 unsigned precode_items[restrict])
699 itemptr = precode_items;
702 while (!((len = lens[run_start]) & 0x80)) {
704 /* len = the length being repeated */
706 /* Find the next run of codeword lengths. */
708 run_end = run_start + 1;
710 /* Fast case for a single length. */
711 if (likely(len != lens[run_end])) {
712 delta = prev_lens[run_start] - len;
715 precode_freqs[delta]++;
721 /* Extend the run. */
724 } while (len == lens[run_end]);
729 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
730 while ((run_end - run_start) >= 20) {
731 extra_bits = min((run_end - run_start) - 20, 0x1f);
733 *itemptr++ = 18 | (extra_bits << 5);
734 run_start += 20 + extra_bits;
737 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
738 if ((run_end - run_start) >= 4) {
739 extra_bits = min((run_end - run_start) - 4, 0xf);
741 *itemptr++ = 17 | (extra_bits << 5);
742 run_start += 4 + extra_bits;
746 /* A run of nonzero lengths. */
748 /* Symbol 19: RLE 4 to 5 of any length at a time. */
749 while ((run_end - run_start) >= 4) {
750 extra_bits = (run_end - run_start) > 4;
751 delta = prev_lens[run_start] - len;
755 precode_freqs[delta]++;
756 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
757 run_start += 4 + extra_bits;
761 /* Output any remaining lengths without RLE. */
762 while (run_start != run_end) {
763 delta = prev_lens[run_start] - len;
766 precode_freqs[delta]++;
772 return itemptr - precode_items;
776 * Output a Huffman code in the compressed form used in LZX.
778 * The Huffman code is represented in the output as a logical series of codeword
779 * lengths from which the Huffman code, which must be in canonical form, can be
782 * The codeword lengths are themselves compressed using a separate Huffman code,
783 * the "precode", which contains a symbol for each possible codeword length in
784 * the larger code as well as several special symbols to represent repeated
785 * codeword lengths (a form of run-length encoding). The precode is itself
786 * constructed in canonical form, and its codeword lengths are represented
787 * literally in 20 4-bit fields that immediately precede the compressed codeword
788 * lengths of the larger code.
790 * Furthermore, the codeword lengths of the larger code are actually represented
791 * as deltas from the codeword lengths of the corresponding code in the previous
795 * Bitstream to which to write the compressed Huffman code.
797 * The codeword lengths, indexed by symbol, in the Huffman code.
799 * The codeword lengths, indexed by symbol, in the corresponding Huffman
800 * code in the previous block, or all zeroes if this is the first block.
802 * The number of symbols in the Huffman code.
805 lzx_write_compressed_code(struct lzx_output_bitstream *os,
806 const u8 lens[restrict],
807 const u8 prev_lens[restrict],
810 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
811 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
812 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
813 unsigned precode_items[num_lens];
814 unsigned num_precode_items;
815 unsigned precode_item;
816 unsigned precode_sym;
818 u8 saved = lens[num_lens];
819 *(u8 *)(lens + num_lens) = 0x80;
821 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
822 precode_freqs[i] = 0;
824 /* Compute the "items" (RLE / literal tokens and extra bits) with which
825 * the codeword lengths in the larger code will be output. */
826 num_precode_items = lzx_compute_precode_items(lens,
831 /* Build the precode. */
832 STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
833 PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
834 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
836 precode_freqs, precode_lens,
839 /* Output the lengths of the codewords in the precode. */
840 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
841 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
843 /* Output the encoded lengths of the codewords in the larger code. */
844 for (i = 0; i < num_precode_items; i++) {
845 precode_item = precode_items[i];
846 precode_sym = precode_item & 0x1F;
847 lzx_add_bits(os, precode_codewords[precode_sym],
848 precode_lens[precode_sym]);
849 if (precode_sym >= 17) {
850 if (precode_sym == 17) {
851 lzx_add_bits(os, precode_item >> 5, 4);
852 } else if (precode_sym == 18) {
853 lzx_add_bits(os, precode_item >> 5, 5);
855 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
856 precode_sym = precode_item >> 6;
857 lzx_add_bits(os, precode_codewords[precode_sym],
858 precode_lens[precode_sym]);
861 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
862 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
865 *(u8 *)(lens + num_lens) = saved;
869 * Write all matches and literal bytes (which were precomputed) in an LZX
870 * compressed block to the output bitstream in the final compressed
874 * The output bitstream.
876 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
877 * LZX_BLOCKTYPE_VERBATIM).
879 * The uncompressed data of the block.
881 * The matches and literals to output, given as a series of sequences.
883 * The main, length, and aligned offset Huffman codes for the current
884 * LZX compressed block.
887 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
888 const u8 *block_data, const struct lzx_sequence sequences[],
889 const struct lzx_codes *codes)
891 const struct lzx_sequence *seq = sequences;
892 u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
895 /* Output the next sequence. */
897 unsigned litrunlen = seq->litrunlen;
899 unsigned main_symbol;
900 unsigned adjusted_length;
902 unsigned offset_slot;
903 unsigned num_extra_bits;
906 /* Output the literal run of the sequence. */
908 if (litrunlen) { /* Is the literal run nonempty? */
910 /* Verify optimization is enabled on 64-bit */
911 STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
912 CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
914 if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
916 /* 64-bit: write 3 literals at a time. */
917 while (litrunlen >= 3) {
918 unsigned lit0 = block_data[0];
919 unsigned lit1 = block_data[1];
920 unsigned lit2 = block_data[2];
921 lzx_add_bits(os, codes->codewords.main[lit0],
922 codes->lens.main[lit0]);
923 lzx_add_bits(os, codes->codewords.main[lit1],
924 codes->lens.main[lit1]);
925 lzx_add_bits(os, codes->codewords.main[lit2],
926 codes->lens.main[lit2]);
927 lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
932 unsigned lit = *block_data++;
933 lzx_add_bits(os, codes->codewords.main[lit],
934 codes->lens.main[lit]);
936 unsigned lit = *block_data++;
937 lzx_add_bits(os, codes->codewords.main[lit],
938 codes->lens.main[lit]);
939 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
941 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
945 /* 32-bit: write 1 literal at a time. */
947 unsigned lit = *block_data++;
948 lzx_add_bits(os, codes->codewords.main[lit],
949 codes->lens.main[lit]);
950 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
951 } while (--litrunlen);
955 /* Was this the last literal run? */
956 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
959 /* Nope; output the match. */
961 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
962 main_symbol = LZX_NUM_CHARS + match_hdr;
963 adjusted_length = seq->adjusted_length;
965 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
967 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
968 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
970 num_extra_bits = lzx_extra_offset_bits[offset_slot];
971 extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
973 #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
974 14 + ALIGNED_CODEWORD_LIMIT)
976 /* Verify optimization is enabled on 64-bit */
977 STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
979 /* Output the main symbol for the match. */
981 lzx_add_bits(os, codes->codewords.main[main_symbol],
982 codes->lens.main[main_symbol]);
983 if (!CAN_BUFFER(MAX_MATCH_BITS))
984 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
986 /* If needed, output the length symbol for the match. */
988 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
989 lzx_add_bits(os, codes->codewords.len[adjusted_length -
990 LZX_NUM_PRIMARY_LENS],
991 codes->lens.len[adjusted_length -
992 LZX_NUM_PRIMARY_LENS]);
993 if (!CAN_BUFFER(MAX_MATCH_BITS))
994 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
997 /* Output the extra offset bits for the match. In aligned
998 * offset blocks, the lowest 3 bits of the adjusted offset are
999 * Huffman-encoded using the aligned offset code, provided that
1000 * there are at least extra 3 offset bits required. All other
1001 * extra offset bits are output verbatim. */
1003 if ((adjusted_offset & ones_if_aligned) >= 16) {
1005 lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
1006 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1007 if (!CAN_BUFFER(MAX_MATCH_BITS))
1008 lzx_flush_bits(os, 14);
1010 lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1011 LZX_ALIGNED_OFFSET_BITMASK],
1012 codes->lens.aligned[adjusted_offset &
1013 LZX_ALIGNED_OFFSET_BITMASK]);
1014 if (!CAN_BUFFER(MAX_MATCH_BITS))
1015 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1017 STATIC_ASSERT(CAN_BUFFER(17));
1019 lzx_add_bits(os, extra_bits, num_extra_bits);
1020 if (!CAN_BUFFER(MAX_MATCH_BITS))
1021 lzx_flush_bits(os, 17);
1024 if (CAN_BUFFER(MAX_MATCH_BITS))
1025 lzx_flush_bits(os, MAX_MATCH_BITS);
1027 /* Advance to the next sequence. */
1033 lzx_write_compressed_block(const u8 *block_begin,
1036 unsigned window_order,
1037 unsigned num_main_syms,
1038 const struct lzx_sequence sequences[],
1039 const struct lzx_codes * codes,
1040 const struct lzx_lens * prev_lens,
1041 struct lzx_output_bitstream * os)
1043 /* The first three bits indicate the type of block and are one of the
1044 * LZX_BLOCKTYPE_* constants. */
1045 lzx_write_bits(os, block_type, 3);
1047 /* Output the block size.
1049 * The original LZX format seemed to always encode the block size in 3
1050 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
1051 * uses the first bit to indicate whether the block is the default size
1052 * (32768) or a different size given explicitly by the next 16 bits.
1054 * By default, this compressor uses a window size of 32768 and therefore
1055 * follows the WIMGAPI behavior. However, this compressor also supports
1056 * window sizes greater than 32768 bytes, which do not appear to be
1057 * supported by WIMGAPI. In such cases, we retain the default size bit
1058 * to mean a size of 32768 bytes but output non-default block size in 24
1059 * bits rather than 16. The compatibility of this behavior is unknown
1060 * because WIMs created with chunk size greater than 32768 can seemingly
1061 * only be opened by wimlib anyway. */
1062 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1063 lzx_write_bits(os, 1, 1);
1065 lzx_write_bits(os, 0, 1);
1067 if (window_order >= 16)
1068 lzx_write_bits(os, block_size >> 16, 8);
1070 lzx_write_bits(os, block_size & 0xFFFF, 16);
1073 /* If it's an aligned offset block, output the aligned offset code. */
1074 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1075 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1076 lzx_write_bits(os, codes->lens.aligned[i],
1077 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1081 /* Output the main code (two parts). */
1082 lzx_write_compressed_code(os, codes->lens.main,
1085 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1086 prev_lens->main + LZX_NUM_CHARS,
1087 num_main_syms - LZX_NUM_CHARS);
1089 /* Output the length code. */
1090 lzx_write_compressed_code(os, codes->lens.len,
1092 LZX_LENCODE_NUM_SYMBOLS);
1094 /* Output the compressed matches and literals. */
1095 lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1098 /* Given the frequencies of symbols in an LZX-compressed block and the
1099 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1100 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1101 * will take fewer bits to output. */
1103 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1104 const struct lzx_codes * codes)
1106 u32 aligned_cost = 0;
1107 u32 verbatim_cost = 0;
1109 /* A verbatim block requires 3 bits in each place that an aligned symbol
1110 * would be used in an aligned offset block. */
1111 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1112 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1113 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1116 /* Account for output of the aligned offset code. */
1117 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1119 if (aligned_cost < verbatim_cost)
1120 return LZX_BLOCKTYPE_ALIGNED;
1122 return LZX_BLOCKTYPE_VERBATIM;
1126 * Return the offset slot for the specified adjusted match offset, using the
1127 * compressor's acceleration tables to speed up the mapping.
1129 static inline unsigned
1130 lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
1133 if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
1134 return c->offset_slot_tab_1[adjusted_offset];
1135 return c->offset_slot_tab_2[adjusted_offset >> 14];
1139 * Finish an LZX block:
1141 * - build the Huffman codes
1142 * - decide whether to output the block as VERBATIM or ALIGNED
1143 * - output the block
1144 * - swap the indices of the current and previous Huffman codes
1147 lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1148 const u8 *block_begin, u32 block_size, u32 seq_idx)
1152 lzx_make_huffman_codes(c);
1154 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1155 &c->codes[c->codes_index]);
1156 lzx_write_compressed_block(block_begin,
1161 &c->chosen_sequences[seq_idx],
1162 &c->codes[c->codes_index],
1163 &c->codes[c->codes_index ^ 1].lens,
1165 c->codes_index ^= 1;
1168 /* Tally the Huffman symbol for a literal and increment the literal run length.
1171 lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
1173 c->freqs.main[literal]++;
1177 /* Tally the Huffman symbol for a match, save the match data and the length of
1178 * the preceding literal run in the next lzx_sequence, and update the recent
1181 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
1182 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
1183 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
1185 u32 litrunlen = *litrunlen_p;
1186 struct lzx_sequence *next_seq = *next_seq_p;
1187 unsigned offset_slot;
1190 v = length - LZX_MIN_MATCH_LEN;
1192 /* Save the literal run length and adjusted length. */
1193 next_seq->litrunlen = litrunlen;
1194 next_seq->adjusted_length = v;
1196 /* Compute the length header and tally the length symbol if needed */
1197 if (v >= LZX_NUM_PRIMARY_LENS) {
1198 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1199 v = LZX_NUM_PRIMARY_LENS;
1202 /* Compute the offset slot */
1203 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1205 /* Compute the match header. */
1206 v += offset_slot * LZX_NUM_LEN_HEADERS;
1208 /* Save the adjusted offset and match header. */
1209 next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
1211 /* Tally the main symbol. */
1212 c->freqs.main[LZX_NUM_CHARS + v]++;
1214 /* Update the recent offsets queue. */
1215 if (offset_data < LZX_NUM_RECENT_OFFSETS) {
1216 /* Repeat offset match */
1217 swap(recent_offsets[0], recent_offsets[offset_data]);
1219 /* Explicit offset match */
1221 /* Tally the aligned offset symbol if needed */
1222 if (offset_data >= 16)
1223 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1225 recent_offsets[2] = recent_offsets[1];
1226 recent_offsets[1] = recent_offsets[0];
1227 recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
1230 /* Reset the literal run length and advance to the next sequence. */
1231 *next_seq_p = next_seq + 1;
1235 /* Finish the last lzx_sequence. The last lzx_sequence is just a literal run;
1236 * there is no match. This literal run may be empty. */
1238 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
1240 last_seq->litrunlen = litrunlen;
1242 /* Special value to mark last sequence */
1243 last_seq->adjusted_offset_and_match_hdr = 0x80000000;
1246 /******************************************************************************/
1249 * Block splitting algorithm. The problem is to decide when it is worthwhile to
1250 * start a new block with new entropy codes. There is a theoretically optimal
1251 * solution: recursively consider every possible block split, considering the
1252 * exact cost of each block, and choose the minimum cost approach. But this is
1253 * far too slow. Instead, as an approximation, we can count symbols and after
1254 * every N symbols, compare the expected distribution of symbols based on the
1255 * previous data with the actual distribution. If they differ "by enough", then
1256 * start a new block.
1258 * As an optimization and heuristic, we don't distinguish between every symbol
1259 * but rather we combine many symbols into a single "observation type". For
1260 * literals we only look at the high bits and low bits, and for matches we only
1261 * look at whether the match is long or not. The assumption is that for typical
1262 * "real" data, places that are good block boundaries will tend to be noticable
1263 * based only on changes in these aggregate frequencies, without looking for
1264 * subtle differences in individual symbols. For example, a change from ASCII
1265 * bytes to non-ASCII bytes, or from few matches (generally less compressible)
1266 * to many matches (generally more compressible), would be easily noticed based
1267 * on the aggregates.
1269 * For determining whether the frequency distributions are "different enough" to
1270 * start a new block, the simply heuristic of splitting when the sum of absolute
1271 * differences exceeds a constant seems to be good enough. We also add a number
1272 * proportional to the block size so that the algorithm is more likely to end
1273 * large blocks than small blocks. This reflects the general expectation that
1274 * it will become increasingly beneficial to start a new block as the current
1275 * blocks grows larger.
1277 * Finally, for an approximation, it is not strictly necessary that the exact
1278 * symbols being used are considered. With "near-optimal parsing", for example,
1279 * the actual symbols that will be used are unknown until after the block
1280 * boundary is chosen and the block has been optimized. Since the final choices
1281 * cannot be used, we can use preliminary "greedy" choices instead.
1284 /* Initialize the block split statistics when starting a new block. */
1286 init_block_split_stats(struct block_split_stats *stats)
1288 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1289 stats->new_observations[i] = 0;
1290 stats->observations[i] = 0;
1292 stats->num_new_observations = 0;
1293 stats->num_observations = 0;
1296 /* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
1297 * literal, for 8 possible literal observation types. */
1299 observe_literal(struct block_split_stats *stats, u8 lit)
1301 stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
1302 stats->num_new_observations++;
1305 /* Match observation. Heuristic: use one observation type for "short match" and
1306 * one observation type for "long match". */
1308 observe_match(struct block_split_stats *stats, unsigned length)
1310 stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
1311 stats->num_new_observations++;
1315 do_end_block_check(struct block_split_stats *stats, u32 block_size)
1317 if (stats->num_observations > 0) {
1319 /* Note: to avoid slow divisions, we do not divide by
1320 * 'num_observations', but rather do all math with the numbers
1321 * multiplied by 'num_observations'. */
1322 u32 total_delta = 0;
1323 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1324 u32 expected = stats->observations[i] * stats->num_new_observations;
1325 u32 actual = stats->new_observations[i] * stats->num_observations;
1326 u32 delta = (actual > expected) ? actual - expected :
1328 total_delta += delta;
1331 /* Ready to end the block? */
1332 if (total_delta + (block_size / 1024) * stats->num_observations >=
1333 stats->num_new_observations * 51 / 64 * stats->num_observations)
1337 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1338 stats->num_observations += stats->new_observations[i];
1339 stats->observations[i] += stats->new_observations[i];
1340 stats->new_observations[i] = 0;
1342 stats->num_new_observations = 0;
1347 should_end_block(struct block_split_stats *stats,
1348 const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
1350 /* Ready to check block split statistics? */
1351 if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK ||
1352 in_next - in_block_begin < MIN_BLOCK_SIZE ||
1353 in_end - in_next < MIN_BLOCK_SIZE)
1356 return do_end_block_check(stats, in_next - in_block_begin);
1359 /******************************************************************************/
1362 * Given the minimum-cost path computed through the item graph for the current
1363 * block, walk the path and count how many of each symbol in each Huffman-coded
1364 * alphabet would be required to output the items (matches and literals) along
1367 * Note that the path will be walked backwards (from the end of the block to the
1368 * beginning of the block), but this doesn't matter because this function only
1369 * computes frequencies.
1372 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1374 u32 node_idx = block_size;
1379 unsigned offset_slot;
1381 /* Tally literals until either a match or the beginning of the
1382 * block is reached. */
1384 u32 item = c->optimum_nodes[node_idx].item;
1386 len = item & OPTIMUM_LEN_MASK;
1387 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1389 if (len != 0) /* Not a literal? */
1392 /* Tally the main symbol for the literal. */
1393 c->freqs.main[offset_data]++;
1395 if (--node_idx == 0) /* Beginning of block was reached? */
1401 /* Tally a match. */
1403 /* Tally the aligned offset symbol if needed. */
1404 if (offset_data >= 16)
1405 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1407 /* Tally the length symbol if needed. */
1408 v = len - LZX_MIN_MATCH_LEN;;
1409 if (v >= LZX_NUM_PRIMARY_LENS) {
1410 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1411 v = LZX_NUM_PRIMARY_LENS;
1414 /* Tally the main symbol. */
1415 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1416 v += offset_slot * LZX_NUM_LEN_HEADERS;
1417 c->freqs.main[LZX_NUM_CHARS + v]++;
1419 if (node_idx == 0) /* Beginning of block was reached? */
1425 * Like lzx_tally_item_list(), but this function also generates the list of
1426 * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1427 * ready to be output to the bitstream after the Huffman codes are computed.
1428 * The lzx_sequences will be written to decreasing memory addresses as the path
1429 * is walked backwards, which means they will end up in the expected
1430 * first-to-last order. The return value is the index in c->chosen_sequences at
1431 * which the lzx_sequences begin.
1434 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1436 u32 node_idx = block_size;
1437 u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1440 /* Special value to mark last sequence */
1441 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1443 lit_start_node = node_idx;
1448 unsigned offset_slot;
1450 /* Record literals until either a match or the beginning of the
1451 * block is reached. */
1453 u32 item = c->optimum_nodes[node_idx].item;
1455 len = item & OPTIMUM_LEN_MASK;
1456 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
1458 if (len != 0) /* Not a literal? */
1461 /* Tally the main symbol for the literal. */
1462 c->freqs.main[offset_data]++;
1464 if (--node_idx == 0) /* Beginning of block was reached? */
1468 /* Save the literal run length for the next sequence (the
1469 * "previous sequence" when walking backwards). */
1470 c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
1472 lit_start_node = node_idx;
1474 /* Record a match. */
1476 /* Tally the aligned offset symbol if needed. */
1477 if (offset_data >= 16)
1478 c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
1480 /* Save the adjusted length. */
1481 v = len - LZX_MIN_MATCH_LEN;
1482 c->chosen_sequences[seq_idx].adjusted_length = v;
1484 /* Tally the length symbol if needed. */
1485 if (v >= LZX_NUM_PRIMARY_LENS) {
1486 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1487 v = LZX_NUM_PRIMARY_LENS;
1490 /* Tally the main symbol. */
1491 offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
1492 v += offset_slot * LZX_NUM_LEN_HEADERS;
1493 c->freqs.main[LZX_NUM_CHARS + v]++;
1495 /* Save the adjusted offset and match header. */
1496 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1497 (offset_data << 9) | v;
1499 if (node_idx == 0) /* Beginning of block was reached? */
1504 /* Save the literal run length for the first sequence. */
1505 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1507 /* Return the index in c->chosen_sequences at which the lzx_sequences
1513 * Find an inexpensive path through the graph of possible match/literal choices
1514 * for the current block. The nodes of the graph are
1515 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1516 * the current block, plus one extra node for end-of-block. The edges of the
1517 * graph are matches and literals. The goal is to find the minimum cost path
1518 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
1520 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1521 * proceeding forwards one node at a time. At each node, a selection of matches
1522 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1523 * length 'len' provides a new path to reach the node 'len' bytes later. If
1524 * such a path is the lowest cost found so far to reach that later node, then
1525 * that later node is updated with the new path.
1527 * Note that although this algorithm is based on minimum cost path search, due
1528 * to various simplifying assumptions the result is not guaranteed to be the
1529 * true minimum cost, or "optimal", path over the graph of all valid LZX
1530 * representations of this block.
1532 * Also, note that because of the presence of the recent offsets queue (which is
1533 * a type of adaptive state), the algorithm cannot work backwards and compute
1534 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1535 * algorithm handles this adaptive state in the "minimum cost" parse is actually
1536 * only an approximation. It's possible for the globally optimal, minimum cost
1537 * path to contain a prefix, ending at a position, where that path prefix is
1538 * *not* the minimum cost path to that position. This can happen if such a path
1539 * prefix results in a different adaptive state which results in lower costs
1540 * later. The algorithm does not solve this problem; it only considers the
1541 * lowest cost to reach each individual position.
1543 static inline struct lzx_lru_queue
1544 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1545 const u8 * const restrict block_begin,
1546 const u32 block_size,
1547 const struct lzx_lru_queue initial_queue,
1550 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1551 struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
1552 struct lz_match *cache_ptr = c->match_cache;
1553 const u8 *in_next = block_begin;
1554 const u8 * const block_end = block_begin + block_size;
1556 /* Instead of storing the match offset LRU queues in the
1557 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1558 * storing them in a smaller array. This works because the algorithm
1559 * only requires a limited history of the adaptive state. Once a given
1560 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node,
1561 * it is no longer needed. */
1562 struct lzx_lru_queue queues[512];
1564 STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1565 #define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
1567 /* Initially, the cost to reach each node is "infinity". */
1568 memset(c->optimum_nodes, 0xFF,
1569 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1571 QUEUE(block_begin) = initial_queue;
1573 /* The following loop runs 'block_size' iterations, one per node. */
1575 unsigned num_matches;
1580 * A selection of matches for the block was already saved in
1581 * memory so that we don't have to run the uncompressed data
1582 * through the matchfinder on every optimization pass. However,
1583 * we still search for repeat offset matches during each
1584 * optimization pass because we cannot predict the state of the
1585 * recent offsets queue. But as a heuristic, we don't bother
1586 * searching for repeat offset matches if the general-purpose
1587 * matchfinder failed to find any matches.
1589 * Note that a match of length n at some offset implies there is
1590 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1591 * that same offset. In other words, we don't necessarily need
1592 * to use the full length of a match. The key heuristic that
1593 * saves a significicant amount of time is that for each
1594 * distinct length, we only consider the smallest offset for
1595 * which that length is available. This heuristic also applies
1596 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1597 * any explicit offset. Of course, this heuristic may be
1598 * produce suboptimal results because offset slots in LZX are
1599 * subject to entropy encoding, but in practice this is a useful
1603 num_matches = cache_ptr->length;
1607 struct lz_match *end_matches = cache_ptr + num_matches;
1608 unsigned next_len = LZX_MIN_MATCH_LEN;
1609 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1612 /* Consider R0 match */
1613 matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
1614 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1616 STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1618 u32 cost = cur_node->cost +
1619 c->costs.match_cost[0][
1620 next_len - LZX_MIN_MATCH_LEN];
1621 if (cost <= (cur_node + next_len)->cost) {
1622 (cur_node + next_len)->cost = cost;
1623 (cur_node + next_len)->item =
1624 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1626 if (unlikely(++next_len > max_len)) {
1627 cache_ptr = end_matches;
1630 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1634 /* Consider R1 match */
1635 matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next));
1636 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1638 if (matchptr[next_len - 1] != in_next[next_len - 1])
1640 for (unsigned len = 2; len < next_len - 1; len++)
1641 if (matchptr[len] != in_next[len])
1644 u32 cost = cur_node->cost +
1645 c->costs.match_cost[1][
1646 next_len - LZX_MIN_MATCH_LEN];
1647 if (cost <= (cur_node + next_len)->cost) {
1648 (cur_node + next_len)->cost = cost;
1649 (cur_node + next_len)->item =
1650 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1652 if (unlikely(++next_len > max_len)) {
1653 cache_ptr = end_matches;
1656 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1660 /* Consider R2 match */
1661 matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next));
1662 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1664 if (matchptr[next_len - 1] != in_next[next_len - 1])
1666 for (unsigned len = 2; len < next_len - 1; len++)
1667 if (matchptr[len] != in_next[len])
1670 u32 cost = cur_node->cost +
1671 c->costs.match_cost[2][
1672 next_len - LZX_MIN_MATCH_LEN];
1673 if (cost <= (cur_node + next_len)->cost) {
1674 (cur_node + next_len)->cost = cost;
1675 (cur_node + next_len)->item =
1676 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1678 if (unlikely(++next_len > max_len)) {
1679 cache_ptr = end_matches;
1682 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1686 while (next_len > cache_ptr->length)
1687 if (++cache_ptr == end_matches)
1690 /* Consider explicit offset matches */
1692 u32 offset = cache_ptr->offset;
1693 u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
1694 unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
1696 u32 base_cost = cur_node->cost;
1698 #if LZX_CONSIDER_ALIGNED_COSTS
1699 if (offset_data >= 16)
1700 base_cost += c->costs.aligned[offset_data &
1701 LZX_ALIGNED_OFFSET_BITMASK];
1705 u32 cost = base_cost +
1706 c->costs.match_cost[offset_slot][
1707 next_len - LZX_MIN_MATCH_LEN];
1708 if (cost < (cur_node + next_len)->cost) {
1709 (cur_node + next_len)->cost = cost;
1710 (cur_node + next_len)->item =
1711 (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
1713 } while (++next_len <= cache_ptr->length);
1714 } while (++cache_ptr != end_matches);
1719 /* Consider coding a literal.
1721 * To avoid an extra branch, actually checking the preferability
1722 * of coding the literal is integrated into the queue update
1724 literal = *in_next++;
1725 cost = cur_node->cost + c->costs.main[literal];
1727 /* Advance to the next position. */
1730 /* The lowest-cost path to the current position is now known.
1731 * Finalize the recent offsets queue that results from taking
1732 * this lowest-cost path. */
1734 if (cost <= cur_node->cost) {
1735 /* Literal: queue remains unchanged. */
1736 cur_node->cost = cost;
1737 cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1738 QUEUE(in_next) = QUEUE(in_next - 1);
1740 /* Match: queue update is needed. */
1741 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1742 u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1743 if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
1744 /* Explicit offset match: insert offset at front */
1746 lzx_lru_queue_push(QUEUE(in_next - len),
1747 offset_data - LZX_OFFSET_ADJUSTMENT);
1749 /* Repeat offset match: swap offset to front */
1751 lzx_lru_queue_swap(QUEUE(in_next - len),
1755 } while (cur_node != end_node);
1757 /* Return the match offset queue at the end of the minimum cost path. */
1758 return QUEUE(block_end);
1761 /* Given the costs for the main and length codewords, compute 'match_costs'. */
1763 lzx_compute_match_costs(struct lzx_compressor *c)
1765 unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1766 LZX_NUM_LEN_HEADERS;
1767 struct lzx_costs *costs = &c->costs;
1769 for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
1771 u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
1772 unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
1773 LZX_NUM_LEN_HEADERS);
1776 #if LZX_CONSIDER_ALIGNED_COSTS
1777 if (offset_slot >= 8)
1778 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1781 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++)
1782 costs->match_cost[offset_slot][i] =
1783 costs->main[main_symbol++] + extra_cost;
1785 extra_cost += costs->main[main_symbol];
1787 for (; i < LZX_NUM_LENS; i++)
1788 costs->match_cost[offset_slot][i] =
1789 costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost;
1793 /* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
1796 lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
1799 bool have_byte[256];
1800 unsigned num_used_bytes;
1802 /* The costs below are hard coded to use a scaling factor of 64. */
1803 STATIC_ASSERT(LZX_BIT_COST == 64);
1808 * - Use smaller initial costs for literal symbols when the input buffer
1809 * contains fewer distinct bytes.
1811 * - Assume that match symbols are more costly than literal symbols.
1813 * - Assume that length symbols for shorter lengths are less costly than
1814 * length symbols for longer lengths.
1817 for (i = 0; i < 256; i++)
1818 have_byte[i] = false;
1820 for (i = 0; i < block_size; i++)
1821 have_byte[block[i]] = true;
1824 for (i = 0; i < 256; i++)
1825 num_used_bytes += have_byte[i];
1827 for (i = 0; i < 256; i++)
1828 c->costs.main[i] = 560 - (256 - num_used_bytes);
1830 for (; i < c->num_main_syms; i++)
1831 c->costs.main[i] = 680;
1833 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1834 c->costs.len[i] = 412 + i;
1836 #if LZX_CONSIDER_ALIGNED_COSTS
1837 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1838 c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
1841 lzx_compute_match_costs(c);
1844 /* Update the current cost model to reflect the computed Huffman codes. */
1846 lzx_update_costs(struct lzx_compressor *c)
1849 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
1851 for (i = 0; i < c->num_main_syms; i++) {
1852 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
1853 MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
1856 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1857 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
1858 LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
1861 #if LZX_CONSIDER_ALIGNED_COSTS
1862 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1863 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
1864 ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
1868 lzx_compute_match_costs(c);
1871 static inline struct lzx_lru_queue
1872 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
1873 struct lzx_output_bitstream * const restrict os,
1874 const u8 * const restrict block_begin,
1875 const u32 block_size,
1876 const struct lzx_lru_queue initial_queue,
1879 unsigned num_passes_remaining = c->num_optim_passes;
1880 struct lzx_lru_queue new_queue;
1883 /* The first optimization pass uses a default cost model. Each
1884 * additional optimization pass uses a cost model derived from the
1885 * Huffman code computed in the previous pass. */
1887 lzx_set_default_costs(c, block_begin, block_size);
1888 lzx_reset_symbol_frequencies(c);
1890 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
1891 initial_queue, is_16_bit);
1892 if (num_passes_remaining > 1) {
1893 lzx_tally_item_list(c, block_size, is_16_bit);
1894 lzx_make_huffman_codes(c);
1895 lzx_update_costs(c);
1896 lzx_reset_symbol_frequencies(c);
1898 } while (--num_passes_remaining);
1900 seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
1901 lzx_finish_block(c, os, block_begin, block_size, seq_idx);
1906 * This is the "near-optimal" LZX compressor.
1908 * For each block, it performs a relatively thorough graph search to find an
1909 * inexpensive (in terms of compressed size) way to output that block.
1911 * Note: there are actually many things this algorithm leaves on the table in
1912 * terms of compression ratio. So although it may be "near-optimal", it is
1913 * certainly not "optimal". The goal is not to produce the optimal compression
1914 * ratio, which for LZX is probably impossible within any practical amount of
1915 * time, but rather to produce a compression ratio significantly better than a
1916 * simpler "greedy" or "lazy" parse while still being relatively fast.
1919 lzx_compress_near_optimal(struct lzx_compressor * restrict c,
1920 const u8 * const restrict in_begin,
1921 struct lzx_output_bitstream * restrict os,
1924 const u8 * in_next = in_begin;
1925 const u8 * const in_end = in_begin + c->in_nbytes;
1926 u32 max_len = LZX_MAX_MATCH_LEN;
1927 u32 nice_len = min(c->nice_match_length, max_len);
1928 u32 next_hashes[2] = {};
1929 struct lzx_lru_queue queue;
1931 CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
1932 lzx_lru_queue_init(&queue);
1935 /* Starting a new block */
1936 const u8 * const in_block_begin = in_next;
1937 const u8 * const in_max_block_end =
1938 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
1939 const u8 *next_observation = in_next;
1941 init_block_split_stats(&c->split_stats);
1943 /* Run the block through the matchfinder and cache the matches. */
1944 struct lz_match *cache_ptr = c->match_cache;
1946 struct lz_match *lz_matchptr;
1949 /* If approaching the end of the input buffer, adjust
1950 * 'max_len' and 'nice_len' accordingly. */
1951 if (unlikely(max_len > in_end - in_next)) {
1952 max_len = in_end - in_next;
1953 nice_len = min(max_len, nice_len);
1954 if (unlikely(max_len <
1955 BT_MATCHFINDER_REQUIRED_NBYTES))
1958 cache_ptr->length = 0;
1964 /* Check for matches. */
1965 lz_matchptr = CALL_BT_MF(is_16_bit, c,
1966 bt_matchfinder_get_matches,
1971 c->max_search_depth,
1976 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
1977 cache_ptr = lz_matchptr;
1979 if (in_next >= next_observation) {
1980 best_len = cache_ptr[-1].length;
1982 observe_match(&c->split_stats, best_len);
1983 next_observation = in_next + best_len;
1985 observe_literal(&c->split_stats, *in_next);
1986 next_observation = in_next + 1;
1993 * If there was a very long match found, then don't
1994 * cache any matches for the bytes covered by that
1995 * match. This avoids degenerate behavior when
1996 * compressing highly redundant data, where the number
1997 * of matches can be very large.
1999 * This heuristic doesn't actually hurt the compression
2000 * ratio very much. If there's a long match, then the
2001 * data must be highly compressible, so it doesn't
2002 * matter as much what we do.
2004 if (best_len >= nice_len) {
2007 if (unlikely(max_len > in_end - in_next)) {
2008 max_len = in_end - in_next;
2009 nice_len = min(max_len, nice_len);
2010 if (unlikely(max_len <
2011 BT_MATCHFINDER_REQUIRED_NBYTES))
2014 cache_ptr->length = 0;
2019 CALL_BT_MF(is_16_bit, c,
2020 bt_matchfinder_skip_position,
2024 c->max_search_depth,
2027 cache_ptr->length = 0;
2029 } while (--best_len);
2031 } while (in_next < in_max_block_end &&
2032 likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]) &&
2033 !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2035 /* We've finished running the block through the matchfinder.
2036 * Now choose a match/literal sequence and write the block. */
2038 queue = lzx_optimize_and_write_block(c, os, in_block_begin,
2039 in_next - in_block_begin,
2041 } while (in_next != in_end);
2045 lzx_compress_near_optimal_16(struct lzx_compressor *c,
2046 struct lzx_output_bitstream *os)
2048 lzx_compress_near_optimal(c, c->in_buffer, os, true);
2052 lzx_compress_near_optimal_32(struct lzx_compressor *c,
2053 struct lzx_output_bitstream *os)
2055 lzx_compress_near_optimal(c, c->in_buffer, os, false);
2059 * Given a pointer to the current byte sequence and the current list of recent
2060 * match offsets, find the longest repeat offset match.
2062 * If no match of at least 2 bytes is found, then return 0.
2064 * If a match of at least 2 bytes is found, then return its length and set
2065 * *rep_max_idx_ret to the index of its offset in @queue.
2068 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
2069 const u32 bytes_remaining,
2070 const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
2071 unsigned *rep_max_idx_ret)
2073 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2075 const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
2076 const u16 next_2_bytes = load_u16_unaligned(in_next);
2078 unsigned rep_max_len;
2079 unsigned rep_max_idx;
2082 matchptr = in_next - recent_offsets[0];
2083 if (load_u16_unaligned(matchptr) == next_2_bytes)
2084 rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
2089 matchptr = in_next - recent_offsets[1];
2090 if (load_u16_unaligned(matchptr) == next_2_bytes) {
2091 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2092 if (rep_len > rep_max_len) {
2093 rep_max_len = rep_len;
2098 matchptr = in_next - recent_offsets[2];
2099 if (load_u16_unaligned(matchptr) == next_2_bytes) {
2100 rep_len = lz_extend(in_next, matchptr, 2, max_len);
2101 if (rep_len > rep_max_len) {
2102 rep_max_len = rep_len;
2107 *rep_max_idx_ret = rep_max_idx;
2111 /* Fast heuristic scoring for lazy parsing: how "good" is this match? */
2112 static inline unsigned
2113 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
2115 unsigned score = len;
2117 if (adjusted_offset < 4096)
2120 if (adjusted_offset < 256)
2126 static inline unsigned
2127 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2132 /* This is the "lazy" LZX compressor. */
2134 lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
2137 const u8 * const in_begin = c->in_buffer;
2138 const u8 * in_next = in_begin;
2139 const u8 * const in_end = in_begin + c->in_nbytes;
2140 unsigned max_len = LZX_MAX_MATCH_LEN;
2141 unsigned nice_len = min(c->nice_match_length, max_len);
2142 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2143 u32 recent_offsets[3] = {1, 1, 1};
2144 u32 next_hashes[2] = {};
2146 CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2149 /* Starting a new block */
2151 const u8 * const in_block_begin = in_next;
2152 const u8 * const in_max_block_end =
2153 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2154 struct lzx_sequence *next_seq = c->chosen_sequences;
2157 u32 cur_offset_data;
2161 u32 next_offset_data;
2162 unsigned next_score;
2163 unsigned rep_max_len;
2164 unsigned rep_max_idx;
2169 lzx_reset_symbol_frequencies(c);
2170 init_block_split_stats(&c->split_stats);
2173 if (unlikely(max_len > in_end - in_next)) {
2174 max_len = in_end - in_next;
2175 nice_len = min(max_len, nice_len);
2178 /* Find the longest match at the current position. */
2180 cur_len = CALL_HC_MF(is_16_bit, c,
2181 hc_matchfinder_longest_match,
2187 c->max_search_depth,
2192 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2193 cur_offset != recent_offsets[0] &&
2194 cur_offset != recent_offsets[1] &&
2195 cur_offset != recent_offsets[2]))
2197 /* There was no match found, or the only match found
2198 * was a distant length 3 match. Output a literal. */
2199 lzx_record_literal(c, *in_next, &litrunlen);
2200 observe_literal(&c->split_stats, *in_next);
2205 observe_match(&c->split_stats, cur_len);
2207 if (cur_offset == recent_offsets[0]) {
2209 cur_offset_data = 0;
2210 skip_len = cur_len - 1;
2211 goto choose_cur_match;
2214 cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT;
2215 cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
2217 /* Consider a repeat offset match */
2218 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2224 if (rep_max_len >= 3 &&
2225 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2226 rep_max_idx)) >= cur_score)
2228 cur_len = rep_max_len;
2229 cur_offset_data = rep_max_idx;
2230 skip_len = rep_max_len - 1;
2231 goto choose_cur_match;
2236 /* We have a match at the current position. */
2238 /* If we have a very long match, choose it immediately. */
2239 if (cur_len >= nice_len) {
2240 skip_len = cur_len - 1;
2241 goto choose_cur_match;
2244 /* See if there's a better match at the next position. */
2246 if (unlikely(max_len > in_end - in_next)) {
2247 max_len = in_end - in_next;
2248 nice_len = min(max_len, nice_len);
2251 next_len = CALL_HC_MF(is_16_bit, c,
2252 hc_matchfinder_longest_match,
2258 c->max_search_depth / 2,
2262 if (next_len <= cur_len - 2) {
2264 skip_len = cur_len - 2;
2265 goto choose_cur_match;
2268 next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT;
2269 next_score = lzx_explicit_offset_match_score(next_len, next_offset_data);
2271 rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
2277 if (rep_max_len >= 3 &&
2278 (rep_score = lzx_repeat_offset_match_score(rep_max_len,
2279 rep_max_idx)) >= next_score)
2282 if (rep_score > cur_score) {
2283 /* The next match is better, and it's a
2284 * repeat offset match. */
2285 lzx_record_literal(c, *(in_next - 2),
2287 cur_len = rep_max_len;
2288 cur_offset_data = rep_max_idx;
2289 skip_len = cur_len - 1;
2290 goto choose_cur_match;
2293 if (next_score > cur_score) {
2294 /* The next match is better, and it's an
2295 * explicit offset match. */
2296 lzx_record_literal(c, *(in_next - 2),
2299 cur_offset_data = next_offset_data;
2300 cur_score = next_score;
2301 goto have_cur_match;
2305 /* The original match was better. */
2306 skip_len = cur_len - 2;
2309 lzx_record_match(c, cur_len, cur_offset_data,
2310 recent_offsets, is_16_bit,
2311 &litrunlen, &next_seq);
2312 in_next = CALL_HC_MF(is_16_bit, c,
2313 hc_matchfinder_skip_positions,
2319 } while (in_next < in_max_block_end &&
2320 !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
2322 lzx_finish_sequence(next_seq, litrunlen);
2324 lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2326 } while (in_next != in_end);
2330 lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2332 lzx_compress_lazy(c, os, true);
2336 lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
2338 lzx_compress_lazy(c, os, false);
2341 /* Generate the acceleration tables for offset slots. */
2343 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2345 u32 adjusted_offset = 0;
2349 for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2352 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2354 c->offset_slot_tab_1[adjusted_offset] = slot;
2357 /* slots [30, 49] */
2358 for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2359 adjusted_offset += (u32)1 << 14)
2361 if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
2363 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2368 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2370 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2371 if (lzx_is_16_bit(max_bufsize))
2372 return offsetof(struct lzx_compressor, hc_mf_16) +
2373 hc_matchfinder_size_16(max_bufsize);
2375 return offsetof(struct lzx_compressor, hc_mf_32) +
2376 hc_matchfinder_size_32(max_bufsize);
2378 if (lzx_is_16_bit(max_bufsize))
2379 return offsetof(struct lzx_compressor, bt_mf_16) +
2380 bt_matchfinder_size_16(max_bufsize);
2382 return offsetof(struct lzx_compressor, bt_mf_32) +
2383 bt_matchfinder_size_32(max_bufsize);
2388 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2393 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2396 size += lzx_get_compressor_size(max_bufsize, compression_level);
2398 size += max_bufsize; /* in_buffer */
2403 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2404 bool destructive, void **c_ret)
2406 unsigned window_order;
2407 struct lzx_compressor *c;
2409 window_order = lzx_get_window_order(max_bufsize);
2410 if (window_order == 0)
2411 return WIMLIB_ERR_INVALID_PARAM;
2413 c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2417 c->destructive = destructive;
2419 c->num_main_syms = lzx_get_num_main_syms(window_order);
2420 c->window_order = window_order;
2422 if (!c->destructive) {
2423 c->in_buffer = MALLOC(max_bufsize);
2428 if (compression_level <= LZX_MAX_FAST_LEVEL) {
2430 /* Fast compression: Use lazy parsing. */
2432 if (lzx_is_16_bit(max_bufsize))
2433 c->impl = lzx_compress_lazy_16;
2435 c->impl = lzx_compress_lazy_32;
2436 c->max_search_depth = (60 * compression_level) / 20;
2437 c->nice_match_length = (80 * compression_level) / 20;
2439 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2440 * halves the max_search_depth when attempting a lazy match, and
2441 * max_search_depth cannot be 0. */
2442 if (c->max_search_depth < 2)
2443 c->max_search_depth = 2;
2446 /* Normal / high compression: Use near-optimal parsing. */
2448 if (lzx_is_16_bit(max_bufsize))
2449 c->impl = lzx_compress_near_optimal_16;
2451 c->impl = lzx_compress_near_optimal_32;
2453 /* Scale nice_match_length and max_search_depth with the
2454 * compression level. */
2455 c->max_search_depth = (24 * compression_level) / 50;
2456 c->nice_match_length = (48 * compression_level) / 50;
2458 /* Set a number of optimization passes appropriate for the
2459 * compression level. */
2461 c->num_optim_passes = 1;
2463 if (compression_level >= 45)
2464 c->num_optim_passes++;
2466 /* Use more optimization passes for higher compression levels.
2467 * But the more passes there are, the less they help --- so
2468 * don't add them linearly. */
2469 if (compression_level >= 70) {
2470 c->num_optim_passes++;
2471 if (compression_level >= 100)
2472 c->num_optim_passes++;
2473 if (compression_level >= 150)
2474 c->num_optim_passes++;
2475 if (compression_level >= 200)
2476 c->num_optim_passes++;
2477 if (compression_level >= 300)
2478 c->num_optim_passes++;
2482 /* max_search_depth == 0 is invalid. */
2483 if (c->max_search_depth < 1)
2484 c->max_search_depth = 1;
2486 if (c->nice_match_length > LZX_MAX_MATCH_LEN)
2487 c->nice_match_length = LZX_MAX_MATCH_LEN;
2489 lzx_init_offset_slot_tabs(c);
2496 return WIMLIB_ERR_NOMEM;
2500 lzx_compress(const void *restrict in, size_t in_nbytes,
2501 void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2503 struct lzx_compressor *c = _c;
2504 struct lzx_output_bitstream os;
2507 /* Don't bother trying to compress very small inputs. */
2508 if (in_nbytes < 100)
2511 /* Copy the input data into the internal buffer and preprocess it. */
2513 c->in_buffer = (void *)in;
2515 memcpy(c->in_buffer, in, in_nbytes);
2516 c->in_nbytes = in_nbytes;
2517 lzx_preprocess(c->in_buffer, in_nbytes);
2519 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2521 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2523 /* Initialize the output bitstream. */
2524 lzx_init_output(&os, out, out_nbytes_avail);
2526 /* Call the compression level-specific compress() function. */
2529 /* Flush the output bitstream and return the compressed size or 0. */
2530 result = lzx_flush_output(&os);
2531 if (!result && c->destructive)
2532 lzx_postprocess(c->in_buffer, c->in_nbytes);
2537 lzx_free_compressor(void *_c)
2539 struct lzx_compressor *c = _c;
2541 if (!c->destructive)
2546 const struct compressor_ops lzx_compressor_ops = {
2547 .get_needed_memory = lzx_get_needed_memory,
2548 .create_compressor = lzx_create_compressor,
2549 .compress = lzx_compress,
2550 .free_compressor = lzx_free_compressor,