4 * A compressor for the LZX compression format, as used in WIM archives.
8 * Copyright (C) 2012-2016 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
26 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
27 * compression format, as used in the WIM (Windows IMaging) file format.
29 * Two different LZX-compatible algorithms are implemented: "near-optimal" and
30 * "lazy". "Near-optimal" is significantly slower than "lazy", but results in a
31 * better compression ratio. The "near-optimal" algorithm is used at the
32 * default compression level.
34 * This file may need some slight modifications to be used outside of the WIM
35 * format. In particular, in other situations the LZX block header might be
36 * slightly different, and sliding window support might be required.
38 * LZX is a compression format derived from DEFLATE, the format used by zlib and
39 * gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain
40 * details are quite similar, such as the method for storing Huffman codes.
41 * However, the main differences are:
43 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
44 * compressible before attempting to compress it further.
46 * - LZX uses a "main" alphabet which combines literals and matches, with the
47 * match symbols containing a "length header" (giving all or part of the match
48 * length) and an "offset slot" (giving, roughly speaking, the order of
49 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks (that is, the kind with preset
52 * Huffman codes); however it does have two types of dynamic Huffman blocks
53 * ("verbatim" and "aligned").
55 * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
56 * useful, but generally only if the compressor is smart about choosing them.
58 * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
59 * of match offsets. This is very useful for certain types of files, such as
60 * binary files that have repeating records.
63 /******************************************************************************/
64 /* General parameters */
65 /*----------------------------------------------------------------------------*/
68 * The compressor uses the faster algorithm at levels <= MAX_FAST_LEVEL. It
69 * uses the slower algorithm at levels > MAX_FAST_LEVEL.
71 #define MAX_FAST_LEVEL 34
74 * The compressor-side limits on the codeword lengths (in bits) for each Huffman
75 * code. To make outputting bits slightly faster, some of these limits are
76 * lower than the limits defined by the LZX format. This does not significantly
77 * affect the compression ratio.
79 #define MAIN_CODEWORD_LIMIT 16
80 #define LENGTH_CODEWORD_LIMIT 12
81 #define ALIGNED_CODEWORD_LIMIT 7
82 #define PRE_CODEWORD_LIMIT 7
85 /******************************************************************************/
86 /* Block splitting parameters */
87 /*----------------------------------------------------------------------------*/
90 * The compressor always outputs blocks of at least this size in bytes, except
91 * for the last block which may need to be smaller.
93 #define MIN_BLOCK_SIZE 6500
96 * The compressor attempts to end a block when it reaches this size in bytes.
97 * The final size might be slightly larger due to matches extending beyond the
98 * end of the block. Specifically:
100 * - The near-optimal compressor may choose a match of up to LZX_MAX_MATCH_LEN
101 * bytes starting at position 'SOFT_MAX_BLOCK_SIZE - 1'.
103 * - The lazy compressor may choose a sequence of literals starting at position
104 * 'SOFT_MAX_BLOCK_SIZE - 1' when it sees a sequence of increasingly better
105 * matches. The final match may be up to LZX_MAX_MATCH_LEN bytes. The
106 * length of the literal sequence is approximately limited by the "nice match
109 #define SOFT_MAX_BLOCK_SIZE 100000
112 * The number of observed items (matches and literals) that represents
113 * sufficient data for the compressor to decide whether the current block should
116 #define NUM_OBSERVATIONS_PER_BLOCK_CHECK 400
119 /******************************************************************************/
120 /* Parameters for slower algorithm */
121 /*----------------------------------------------------------------------------*/
124 * The log base 2 of the number of entries in the hash table for finding length
125 * 2 matches. This could be as high as 16, but using a smaller hash table
126 * speeds up compression due to reduced cache pressure.
128 #define BT_MATCHFINDER_HASH2_ORDER 12
131 * The number of lz_match structures in the match cache, excluding the extra
132 * "overflow" entries. This value should be high enough so that nearly the
133 * time, all matches found in a given block can fit in the match cache.
134 * However, fallback behavior (immediately terminating the block) on cache
135 * overflow is still required.
137 #define CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5)
140 * An upper bound on the number of matches that can ever be saved in the match
141 * cache for a single position. Since each match we save for a single position
142 * has a distinct length, we can use the number of possible match lengths in LZX
143 * as this bound. This bound is guaranteed to be valid in all cases, although
144 * if 'nice_match_length < LZX_MAX_MATCH_LEN', then it will never actually be
147 #define MAX_MATCHES_PER_POS LZX_NUM_LENS
150 * A scaling factor that makes it possible to consider fractional bit costs. A
151 * single bit has a cost of BIT_COST.
153 * Note: this is only useful as a statistical trick for when the true costs are
154 * unknown. Ultimately, each token in LZX requires a whole number of bits to
160 * Should the compressor take into account the costs of aligned offset symbols
161 * instead of assuming that all are equally likely?
163 #define CONSIDER_ALIGNED_COSTS 1
166 * Should the "minimum" cost path search algorithm consider "gap" matches, where
167 * a normal match is followed by a literal, then by a match with the same
168 * offset? This is one specific, somewhat common situation in which the true
169 * minimum cost path is often different from the path found by looking only one
172 #define CONSIDER_GAP_MATCHES 1
174 /******************************************************************************/
176 /*----------------------------------------------------------------------------*/
182 #include "wimlib/compress_common.h"
183 #include "wimlib/compressor_ops.h"
184 #include "wimlib/error.h"
185 #include "wimlib/lz_extend.h"
186 #include "wimlib/lzx_common.h"
187 #include "wimlib/unaligned.h"
188 #include "wimlib/util.h"
190 /* Note: BT_MATCHFINDER_HASH2_ORDER must be defined before including
191 * bt_matchfinder.h. */
193 /* Matchfinders with 16-bit positions */
195 #define MF_SUFFIX _16
196 #include "wimlib/bt_matchfinder.h"
197 #include "wimlib/hc_matchfinder.h"
199 /* Matchfinders with 32-bit positions */
203 #define MF_SUFFIX _32
204 #include "wimlib/bt_matchfinder.h"
205 #include "wimlib/hc_matchfinder.h"
207 /******************************************************************************/
208 /* Compressor structure */
209 /*----------------------------------------------------------------------------*/
211 /* Codewords for the Huffman codes */
212 struct lzx_codewords {
213 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
214 u32 len[LZX_LENCODE_NUM_SYMBOLS];
215 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
219 * Codeword lengths, in bits, for the Huffman codes.
221 * A codeword length of 0 means the corresponding codeword has zero frequency.
223 * The main and length codes each have one extra entry for use as a sentinel.
224 * See lzx_write_compressed_code().
227 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
228 u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
229 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
232 /* Codewords and lengths for the Huffman codes */
234 struct lzx_codewords codewords;
235 struct lzx_lens lens;
238 /* Symbol frequency counters for the Huffman-encoded alphabets */
240 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
241 u32 len[LZX_LENCODE_NUM_SYMBOLS];
242 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
245 /* Block split statistics. See the "Block splitting algorithm" section later in
246 * this file for details. */
247 #define NUM_LITERAL_OBSERVATION_TYPES 8
248 #define NUM_MATCH_OBSERVATION_TYPES 2
249 #define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + \
250 NUM_MATCH_OBSERVATION_TYPES)
251 struct lzx_block_split_stats {
252 u32 new_observations[NUM_OBSERVATION_TYPES];
253 u32 observations[NUM_OBSERVATION_TYPES];
254 u32 num_new_observations;
255 u32 num_observations;
259 * Represents a run of literals followed by a match or end-of-block. This
260 * structure is needed to temporarily store items chosen by the compressor,
261 * since items cannot be written until all items for the block have been chosen
262 * and the block's Huffman codes have been computed.
264 struct lzx_sequence {
266 /* The number of literals in the run. This may be 0. The literals are
267 * not stored explicitly in this structure; instead, they are read
268 * directly from the uncompressed data. */
271 /* If the next field doesn't indicate end-of-block, then this is the
272 * match length minus LZX_MIN_MATCH_LEN. */
275 /* If bit 31 is clear, then this field contains the match header in bits
276 * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
277 * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
278 * sequence's literal run was the last literal run in the block, so
279 * there is no match that follows it. */
280 u32 adjusted_offset_and_match_hdr;
284 * This structure represents a byte position in the input buffer and a node in
285 * the graph of possible match/literal choices.
287 * Logically, each incoming edge to this node is labeled with a literal or a
288 * match that can be taken to reach this position from an earlier position; and
289 * each outgoing edge from this node is labeled with a literal or a match that
290 * can be taken to advance from this position to a later position.
292 struct lzx_optimum_node {
294 /* The cost, in bits, of the lowest-cost path that has been found to
295 * reach this position. This can change as progressively lower cost
296 * paths are found to reach this position. */
300 * The best arrival to this node, i.e. the match or literal that was
301 * used to arrive to this position at the given 'cost'. This can change
302 * as progressively lower cost paths are found to reach this position.
304 * For non-gap matches, this variable is divided into two bitfields
305 * whose meanings depend on the item type:
308 * Low bits are 0, high bits are the literal.
310 * Explicit offset matches:
311 * Low bits are the match length, high bits are the offset plus
312 * LZX_OFFSET_ADJUSTMENT.
314 * Repeat offset matches:
315 * Low bits are the match length, high bits are the queue index.
317 * For gap matches, identified by OPTIMUM_GAP_MATCH set, special
318 * behavior applies --- see the code.
321 #define OPTIMUM_OFFSET_SHIFT 9
322 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
323 #if CONSIDER_GAP_MATCHES
324 # define OPTIMUM_GAP_MATCH 0x80000000
327 } _aligned_attribute(8);
329 /* The cost model for near-optimal parsing */
333 * 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost of a
334 * length 'len' match which has an offset belonging to 'offset_slot'.
335 * The cost includes the main symbol, the length symbol if required, and
336 * the extra offset bits if any, excluding any entropy-coded bits
337 * (aligned offset bits). It does *not* include the cost of the aligned
338 * offset symbol which may be required.
340 u16 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
342 /* Cost of each symbol in the main code */
343 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
345 /* Cost of each symbol in the length code */
346 u32 len[LZX_LENCODE_NUM_SYMBOLS];
348 #if CONSIDER_ALIGNED_COSTS
349 /* Cost of each symbol in the aligned offset code */
350 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
354 struct lzx_output_bitstream;
356 /* The main LZX compressor structure */
357 struct lzx_compressor {
359 /* The buffer for preprocessed input data, if not using destructive
363 /* If true, then the compressor need not preserve the input buffer if it
364 * compresses the data successfully */
367 /* Pointer to the compress() implementation chosen at allocation time */
368 void (*impl)(struct lzx_compressor *, const u8 *, size_t,
369 struct lzx_output_bitstream *);
371 /* The log base 2 of the window size for match offset encoding purposes.
372 * This will be >= LZX_MIN_WINDOW_ORDER and <= LZX_MAX_WINDOW_ORDER. */
373 unsigned window_order;
375 /* The number of symbols in the main alphabet. This depends on the
376 * window order, since the window order determines the maximum possible
378 unsigned num_main_syms;
380 /* The "nice" match length: if a match of this length is found, then it
381 * is chosen immediately without further consideration. */
382 unsigned nice_match_length;
384 /* The maximum search depth: at most this many potential matches are
385 * considered at each position. */
386 unsigned max_search_depth;
388 /* The number of optimization passes per block */
389 unsigned num_optim_passes;
391 /* The symbol frequency counters for the current block */
392 struct lzx_freqs freqs;
394 /* Block split statistics for the current block */
395 struct lzx_block_split_stats split_stats;
397 /* The Huffman codes for the current and previous blocks. The one with
398 * index 'codes_index' is for the current block, and the other one is
399 * for the previous block. */
400 struct lzx_codes codes[2];
401 unsigned codes_index;
403 /* The matches and literals that the compressor has chosen for the
404 * current block. The required length of this array is limited by the
405 * maximum number of matches that can ever be chosen for a single block,
406 * plus one for the special entry at the end. */
407 struct lzx_sequence chosen_sequences[
408 DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
410 /* Tables for mapping adjusted offsets to offset slots */
411 u8 offset_slot_tab_1[32768]; /* offset slots [0, 29] */
412 u8 offset_slot_tab_2[128]; /* offset slots [30, 49] */
415 /* Data for lzx_compress_lazy() */
417 /* Hash chains matchfinder (MUST BE LAST!!!) */
419 struct hc_matchfinder_16 hc_mf_16;
420 struct hc_matchfinder_32 hc_mf_32;
424 /* Data for lzx_compress_near_optimal() */
427 * Array of nodes, one per position, for running the
428 * minimum-cost path algorithm.
430 * This array must be large enough to accommodate the
431 * worst-case number of nodes, which occurs if the
432 * compressor finds a match of length LZX_MAX_MATCH_LEN
433 * at position 'SOFT_MAX_BLOCK_SIZE - 1', producing a
434 * block of size 'SOFT_MAX_BLOCK_SIZE - 1 +
435 * LZX_MAX_MATCH_LEN'. Add one for the end-of-block
438 struct lzx_optimum_node optimum_nodes[
439 SOFT_MAX_BLOCK_SIZE - 1 +
440 LZX_MAX_MATCH_LEN + 1];
442 /* The cost model for the current optimization pass */
443 struct lzx_costs costs;
446 * Cached matches for the current block. This array
447 * contains the matches that were found at each position
448 * in the block. Specifically, for each position, there
449 * is a special 'struct lz_match' whose 'length' field
450 * contains the number of matches that were found at
451 * that position; this is followed by the matches
452 * themselves, if any, sorted by strictly increasing
455 * Note: in rare cases, there will be a very high number
456 * of matches in the block and this array will overflow.
457 * If this happens, we force the end of the current
458 * block. CACHE_LENGTH is the length at which we
459 * actually check for overflow. The extra slots beyond
460 * this are enough to absorb the worst case overflow,
461 * which occurs if starting at &match_cache[CACHE_LENGTH
462 * - 1], we write the match count header, then write
463 * MAX_MATCHES_PER_POS matches, then skip searching for
464 * matches at 'LZX_MAX_MATCH_LEN - 1' positions and
465 * write the match count header for each.
467 struct lz_match match_cache[CACHE_LENGTH +
468 MAX_MATCHES_PER_POS +
469 LZX_MAX_MATCH_LEN - 1];
471 /* Binary trees matchfinder (MUST BE LAST!!!) */
473 struct bt_matchfinder_16 bt_mf_16;
474 struct bt_matchfinder_32 bt_mf_32;
480 /******************************************************************************/
481 /* Matchfinder utilities */
482 /*----------------------------------------------------------------------------*/
485 * Will a matchfinder using 16-bit positions be sufficient for compressing
486 * buffers of up to the specified size? The limit could be 65536 bytes, but we
487 * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
488 * This requires that the limit be no more than the length of offset_slot_tab_1
492 lzx_is_16_bit(size_t max_bufsize)
494 STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
495 return max_bufsize <= 32768;
499 * Return the offset slot for the specified adjusted match offset.
501 static inline unsigned
502 lzx_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
505 if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
506 return c->offset_slot_tab_1[adjusted_offset];
507 return c->offset_slot_tab_2[adjusted_offset >> 14];
511 * The following macros call either the 16-bit or the 32-bit version of a
512 * matchfinder function based on the value of 'is_16_bit', which will be known
513 * at compilation time.
516 #define CALL_HC_MF(is_16_bit, c, funcname, ...) \
517 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
518 CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
520 #define CALL_BT_MF(is_16_bit, c, funcname, ...) \
521 ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
522 CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
524 /******************************************************************************/
525 /* Output bitstream */
526 /*----------------------------------------------------------------------------*/
529 * The LZX bitstream is encoded as a sequence of little endian 16-bit coding
530 * units. Bits are ordered from most significant to least significant within
535 * Structure to keep track of the current state of sending bits to the
536 * compressed output buffer.
538 struct lzx_output_bitstream {
540 /* Bits that haven't yet been written to the output buffer */
541 machine_word_t bitbuf;
543 /* Number of bits currently held in @bitbuf */
544 machine_word_t bitcount;
546 /* Pointer to the start of the output buffer */
549 /* Pointer to the position in the output buffer at which the next coding
550 * unit should be written */
553 /* Pointer to just past the end of the output buffer, rounded down by
554 * one byte if needed to make 'end - start' a multiple of 2 */
558 /* Can the specified number of bits always be added to 'bitbuf' after all
559 * pending 16-bit coding units have been flushed? */
560 #define CAN_BUFFER(n) ((n) <= WORDBITS - 15)
562 /* Initialize the output bitstream to write to the specified buffer. */
564 lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
570 os->end = (u8 *)buffer + (size & ~1);
574 * Add some bits to the bitbuffer variable of the output bitstream. The caller
575 * must make sure there is enough room.
578 lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
580 os->bitbuf = (os->bitbuf << num_bits) | bits;
581 os->bitcount += num_bits;
585 * Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
586 * specifies the maximum number of bits that may have been added since the last
590 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
592 /* Masking the number of bits to shift is only needed to avoid undefined
593 * behavior; we don't actually care about the results of bad shifts. On
594 * x86, the explicit masking generates no extra code. */
595 const u32 shift_mask = WORDBITS - 1;
597 if (os->end - os->next < 6)
599 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
600 shift_mask), os->next + 0);
601 if (max_num_bits > 16)
602 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
603 shift_mask), os->next + 2);
604 if (max_num_bits > 32)
605 put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
606 shift_mask), os->next + 4);
607 os->next += (os->bitcount >> 4) << 1;
611 /* Add at most 16 bits to the bitbuffer and flush it. */
613 lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
615 lzx_add_bits(os, bits, num_bits);
616 lzx_flush_bits(os, 16);
620 * Flush the last coding unit to the output buffer if needed. Return the total
621 * number of bytes written to the output buffer, or 0 if an overflow occurred.
624 lzx_flush_output(struct lzx_output_bitstream *os)
626 if (os->end - os->next < 6)
629 if (os->bitcount != 0) {
630 put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
634 return os->next - os->start;
637 /******************************************************************************/
638 /* Preparing Huffman codes */
639 /*----------------------------------------------------------------------------*/
642 * Build the Huffman codes. This takes as input the frequency tables for each
643 * code and produces as output a set of tables that map symbols to codewords and
647 lzx_build_huffman_codes(struct lzx_compressor *c)
649 const struct lzx_freqs *freqs = &c->freqs;
650 struct lzx_codes *codes = &c->codes[c->codes_index];
652 STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
653 MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
654 make_canonical_huffman_code(c->num_main_syms,
658 codes->codewords.main);
660 STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
661 LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
662 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
663 LENGTH_CODEWORD_LIMIT,
666 codes->codewords.len);
668 STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
669 ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
670 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
671 ALIGNED_CODEWORD_LIMIT,
674 codes->codewords.aligned);
677 /* Reset the symbol frequencies for the current block. */
679 lzx_reset_symbol_frequencies(struct lzx_compressor *c)
681 memset(&c->freqs, 0, sizeof(c->freqs));
685 lzx_compute_precode_items(const u8 lens[restrict],
686 const u8 prev_lens[restrict],
687 u32 precode_freqs[restrict],
688 unsigned precode_items[restrict])
697 itemptr = precode_items;
700 while (!((len = lens[run_start]) & 0x80)) {
702 /* len = the length being repeated */
704 /* Find the next run of codeword lengths. */
706 run_end = run_start + 1;
708 /* Fast case for a single length. */
709 if (likely(len != lens[run_end])) {
710 delta = prev_lens[run_start] - len;
713 precode_freqs[delta]++;
719 /* Extend the run. */
722 } while (len == lens[run_end]);
727 /* Symbol 18: RLE 20 to 51 zeroes at a time. */
728 while ((run_end - run_start) >= 20) {
729 extra_bits = min((run_end - run_start) - 20, 0x1F);
731 *itemptr++ = 18 | (extra_bits << 5);
732 run_start += 20 + extra_bits;
735 /* Symbol 17: RLE 4 to 19 zeroes at a time. */
736 if ((run_end - run_start) >= 4) {
737 extra_bits = min((run_end - run_start) - 4, 0xF);
739 *itemptr++ = 17 | (extra_bits << 5);
740 run_start += 4 + extra_bits;
744 /* A run of nonzero lengths. */
746 /* Symbol 19: RLE 4 to 5 of any length at a time. */
747 while ((run_end - run_start) >= 4) {
748 extra_bits = (run_end - run_start) > 4;
749 delta = prev_lens[run_start] - len;
753 precode_freqs[delta]++;
754 *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
755 run_start += 4 + extra_bits;
759 /* Output any remaining lengths without RLE. */
760 while (run_start != run_end) {
761 delta = prev_lens[run_start] - len;
764 precode_freqs[delta]++;
770 return itemptr - precode_items;
773 /******************************************************************************/
774 /* Outputting compressed data */
775 /*----------------------------------------------------------------------------*/
778 * Output a Huffman code in the compressed form used in LZX.
780 * The Huffman code is represented in the output as a logical series of codeword
781 * lengths from which the Huffman code, which must be in canonical form, can be
784 * The codeword lengths are themselves compressed using a separate Huffman code,
785 * the "precode", which contains a symbol for each possible codeword length in
786 * the larger code as well as several special symbols to represent repeated
787 * codeword lengths (a form of run-length encoding). The precode is itself
788 * constructed in canonical form, and its codeword lengths are represented
789 * literally in 20 4-bit fields that immediately precede the compressed codeword
790 * lengths of the larger code.
792 * Furthermore, the codeword lengths of the larger code are actually represented
793 * as deltas from the codeword lengths of the corresponding code in the previous
797 * Bitstream to which to write the compressed Huffman code.
799 * The codeword lengths, indexed by symbol, in the Huffman code.
801 * The codeword lengths, indexed by symbol, in the corresponding Huffman
802 * code in the previous block, or all zeroes if this is the first block.
804 * The number of symbols in the Huffman code.
807 lzx_write_compressed_code(struct lzx_output_bitstream *os,
808 const u8 lens[restrict],
809 const u8 prev_lens[restrict],
812 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
813 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
814 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
815 unsigned precode_items[num_lens];
816 unsigned num_precode_items;
817 unsigned precode_item;
818 unsigned precode_sym;
820 u8 saved = lens[num_lens];
821 *(u8 *)(lens + num_lens) = 0x80;
823 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
824 precode_freqs[i] = 0;
826 /* Compute the "items" (RLE / literal tokens and extra bits) with which
827 * the codeword lengths in the larger code will be output. */
828 num_precode_items = lzx_compute_precode_items(lens,
833 /* Build the precode. */
834 STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
835 PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
836 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS, PRE_CODEWORD_LIMIT,
837 precode_freqs, precode_lens,
840 /* Output the lengths of the codewords in the precode. */
841 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
842 lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
844 /* Output the encoded lengths of the codewords in the larger code. */
845 for (i = 0; i < num_precode_items; i++) {
846 precode_item = precode_items[i];
847 precode_sym = precode_item & 0x1F;
848 lzx_add_bits(os, precode_codewords[precode_sym],
849 precode_lens[precode_sym]);
850 if (precode_sym >= 17) {
851 if (precode_sym == 17) {
852 lzx_add_bits(os, precode_item >> 5, 4);
853 } else if (precode_sym == 18) {
854 lzx_add_bits(os, precode_item >> 5, 5);
856 lzx_add_bits(os, (precode_item >> 5) & 1, 1);
857 precode_sym = precode_item >> 6;
858 lzx_add_bits(os, precode_codewords[precode_sym],
859 precode_lens[precode_sym]);
862 STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
863 lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
866 *(u8 *)(lens + num_lens) = saved;
870 * Write all matches and literal bytes (which were precomputed) in an LZX
871 * compressed block to the output bitstream in the final compressed
875 * The output bitstream.
877 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
878 * LZX_BLOCKTYPE_VERBATIM).
880 * The uncompressed data of the block.
882 * The matches and literals to output, given as a series of sequences.
884 * The main, length, and aligned offset Huffman codes for the block.
887 lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
888 const u8 *block_data, const struct lzx_sequence sequences[],
889 const struct lzx_codes *codes)
891 const struct lzx_sequence *seq = sequences;
892 unsigned min_aligned_offset_slot;
894 if (block_type == LZX_BLOCKTYPE_ALIGNED)
895 min_aligned_offset_slot = LZX_MIN_ALIGNED_OFFSET_SLOT;
897 min_aligned_offset_slot = LZX_MAX_OFFSET_SLOTS;
900 /* Output the next sequence. */
902 unsigned litrunlen = seq->litrunlen;
904 unsigned main_symbol;
905 unsigned adjusted_length;
907 unsigned offset_slot;
908 unsigned num_extra_bits;
911 /* Output the literal run of the sequence. */
913 if (litrunlen) { /* Is the literal run nonempty? */
915 /* Verify optimization is enabled on 64-bit */
916 STATIC_ASSERT(WORDBITS < 64 ||
917 CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
919 if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
921 /* 64-bit: write 3 literals at a time. */
922 while (litrunlen >= 3) {
923 unsigned lit0 = block_data[0];
924 unsigned lit1 = block_data[1];
925 unsigned lit2 = block_data[2];
926 lzx_add_bits(os, codes->codewords.main[lit0],
927 codes->lens.main[lit0]);
928 lzx_add_bits(os, codes->codewords.main[lit1],
929 codes->lens.main[lit1]);
930 lzx_add_bits(os, codes->codewords.main[lit2],
931 codes->lens.main[lit2]);
932 lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
937 unsigned lit = *block_data++;
938 lzx_add_bits(os, codes->codewords.main[lit],
939 codes->lens.main[lit]);
941 unsigned lit = *block_data++;
942 lzx_add_bits(os, codes->codewords.main[lit],
943 codes->lens.main[lit]);
944 lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
946 lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
950 /* 32-bit: write 1 literal at a time. */
952 unsigned lit = *block_data++;
953 lzx_add_bits(os, codes->codewords.main[lit],
954 codes->lens.main[lit]);
955 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
956 } while (--litrunlen);
960 /* Was this the last literal run? */
961 if (seq->adjusted_offset_and_match_hdr & 0x80000000)
964 /* Nope; output the match. */
966 match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
967 main_symbol = LZX_NUM_CHARS + match_hdr;
968 adjusted_length = seq->adjusted_length;
970 block_data += adjusted_length + LZX_MIN_MATCH_LEN;
972 offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
973 adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
975 num_extra_bits = lzx_extra_offset_bits[offset_slot];
976 extra_bits = adjusted_offset - (lzx_offset_slot_base[offset_slot] +
977 LZX_OFFSET_ADJUSTMENT);
979 #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
980 14 + ALIGNED_CODEWORD_LIMIT)
982 /* Verify optimization is enabled on 64-bit */
983 STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS));
985 /* Output the main symbol for the match. */
987 lzx_add_bits(os, codes->codewords.main[main_symbol],
988 codes->lens.main[main_symbol]);
989 if (!CAN_BUFFER(MAX_MATCH_BITS))
990 lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
992 /* If needed, output the length symbol for the match. */
994 if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
995 lzx_add_bits(os, codes->codewords.len[adjusted_length -
996 LZX_NUM_PRIMARY_LENS],
997 codes->lens.len[adjusted_length -
998 LZX_NUM_PRIMARY_LENS]);
999 if (!CAN_BUFFER(MAX_MATCH_BITS))
1000 lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
1003 /* Output the extra offset bits for the match. In aligned
1004 * offset blocks, the lowest 3 bits of the adjusted offset are
1005 * Huffman-encoded using the aligned offset code, provided that
1006 * there are at least extra 3 offset bits required. All other
1007 * extra offset bits are output verbatim. */
1009 if (offset_slot >= min_aligned_offset_slot) {
1011 lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
1012 num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
1013 if (!CAN_BUFFER(MAX_MATCH_BITS))
1014 lzx_flush_bits(os, 14);
1016 lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
1017 LZX_ALIGNED_OFFSET_BITMASK],
1018 codes->lens.aligned[adjusted_offset &
1019 LZX_ALIGNED_OFFSET_BITMASK]);
1020 if (!CAN_BUFFER(MAX_MATCH_BITS))
1021 lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
1023 STATIC_ASSERT(CAN_BUFFER(17));
1025 lzx_add_bits(os, extra_bits, num_extra_bits);
1026 if (!CAN_BUFFER(MAX_MATCH_BITS))
1027 lzx_flush_bits(os, 17);
1030 if (CAN_BUFFER(MAX_MATCH_BITS))
1031 lzx_flush_bits(os, MAX_MATCH_BITS);
1033 /* Advance to the next sequence. */
1039 lzx_write_compressed_block(const u8 *block_begin,
1042 unsigned window_order,
1043 unsigned num_main_syms,
1044 const struct lzx_sequence sequences[],
1045 const struct lzx_codes * codes,
1046 const struct lzx_lens * prev_lens,
1047 struct lzx_output_bitstream * os)
1049 /* The first three bits indicate the type of block and are one of the
1050 * LZX_BLOCKTYPE_* constants. */
1051 lzx_write_bits(os, block_type, 3);
1054 * Output the block size.
1056 * The original LZX format encoded the block size in 24 bits. However,
1057 * the LZX format used in WIM archives uses 1 bit to specify whether the
1058 * block has the default size of 32768 bytes, then optionally 16 bits to
1059 * specify a non-default size. This works fine for Microsoft's WIM
1060 * software (WIMGAPI), which never compresses more than 32768 bytes at a
1061 * time with LZX. However, as an extension, our LZX compressor supports
1062 * compressing up to 2097152 bytes, with a corresponding increase in
1063 * window size. It is possible for blocks in these larger buffers to
1064 * exceed 65535 bytes; such blocks cannot have their size represented in
1067 * The chosen solution was to use 24 bits for the block size when
1068 * possibly required --- specifically, when the compressor has been
1069 * allocated to be capable of compressing more than 32768 bytes at once
1070 * (which also causes the number of main symbols to be increased).
1072 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
1073 lzx_write_bits(os, 1, 1);
1075 lzx_write_bits(os, 0, 1);
1077 if (window_order >= 16)
1078 lzx_write_bits(os, block_size >> 16, 8);
1080 lzx_write_bits(os, block_size & 0xFFFF, 16);
1083 /* If it's an aligned offset block, output the aligned offset code. */
1084 if (block_type == LZX_BLOCKTYPE_ALIGNED) {
1085 for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1086 lzx_write_bits(os, codes->lens.aligned[i],
1087 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1091 /* Output the main code (two parts). */
1092 lzx_write_compressed_code(os, codes->lens.main,
1095 lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
1096 prev_lens->main + LZX_NUM_CHARS,
1097 num_main_syms - LZX_NUM_CHARS);
1099 /* Output the length code. */
1100 lzx_write_compressed_code(os, codes->lens.len,
1102 LZX_LENCODE_NUM_SYMBOLS);
1104 /* Output the compressed matches and literals. */
1105 lzx_write_sequences(os, block_type, block_begin, sequences, codes);
1109 * Given the frequencies of symbols in an LZX-compressed block and the
1110 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1111 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1112 * will take fewer bits to output.
1115 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1116 const struct lzx_codes * codes)
1118 u32 verbatim_cost = 0;
1119 u32 aligned_cost = 0;
1121 /* A verbatim block requires 3 bits in each place that an aligned offset
1122 * symbol would be used in an aligned offset block. */
1123 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1124 verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
1125 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1128 /* Account for the cost of sending the codeword lengths of the aligned
1130 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE *
1131 LZX_ALIGNEDCODE_NUM_SYMBOLS;
1133 if (aligned_cost < verbatim_cost)
1134 return LZX_BLOCKTYPE_ALIGNED;
1136 return LZX_BLOCKTYPE_VERBATIM;
1140 * Flush an LZX block:
1142 * 1. Build the Huffman codes.
1143 * 2. Decide whether to output the block as VERBATIM or ALIGNED.
1144 * 3. Write the block.
1145 * 4. Swap the indices of the current and previous Huffman codes.
1147 * Note: we never output UNCOMPRESSED blocks. This probably should be
1148 * implemented sometime, but it doesn't make much difference.
1151 lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
1152 const u8 *block_begin, u32 block_size, u32 seq_idx)
1156 lzx_build_huffman_codes(c);
1158 block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
1159 &c->codes[c->codes_index]);
1160 lzx_write_compressed_block(block_begin,
1165 &c->chosen_sequences[seq_idx],
1166 &c->codes[c->codes_index],
1167 &c->codes[c->codes_index ^ 1].lens,
1169 c->codes_index ^= 1;
1172 /******************************************************************************/
1173 /* Block splitting algorithm */
1174 /*----------------------------------------------------------------------------*/
1177 * The problem of block splitting is to decide when it is worthwhile to start a
1178 * new block with new entropy codes. There is a theoretically optimal solution:
1179 * recursively consider every possible block split, considering the exact cost
1180 * of each block, and choose the minimum cost approach. But this is far too
1181 * slow. Instead, as an approximation, we can count symbols and after every N
1182 * symbols, compare the expected distribution of symbols based on the previous
1183 * data with the actual distribution. If they differ "by enough", then start a
1186 * As an optimization and heuristic, we don't distinguish between every symbol
1187 * but rather we combine many symbols into a single "observation type". For
1188 * literals we only look at the high bits and low bits, and for matches we only
1189 * look at whether the match is long or not. The assumption is that for typical
1190 * "real" data, places that are good block boundaries will tend to be noticable
1191 * based only on changes in these aggregate frequencies, without looking for
1192 * subtle differences in individual symbols. For example, a change from ASCII
1193 * bytes to non-ASCII bytes, or from few matches (generally less compressible)
1194 * to many matches (generally more compressible), would be easily noticed based
1195 * on the aggregates.
1197 * For determining whether the frequency distributions are "different enough" to
1198 * start a new block, the simply heuristic of splitting when the sum of absolute
1199 * differences exceeds a constant seems to be good enough.
1201 * Finally, for an approximation, it is not strictly necessary that the exact
1202 * symbols being used are considered. With "near-optimal parsing", for example,
1203 * the actual symbols that will be used are unknown until after the block
1204 * boundary is chosen and the block has been optimized. Since the final choices
1205 * cannot be used, we can use preliminary "greedy" choices instead.
1208 /* Initialize the block split statistics when starting a new block. */
1210 lzx_init_block_split_stats(struct lzx_block_split_stats *stats)
1212 memset(stats, 0, sizeof(*stats));
1215 /* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
1216 * literal, for 8 possible literal observation types. */
1218 lzx_observe_literal(struct lzx_block_split_stats *stats, u8 lit)
1220 stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
1221 stats->num_new_observations++;
1224 /* Match observation. Heuristic: use one observation type for "short match" and
1225 * one observation type for "long match". */
1227 lzx_observe_match(struct lzx_block_split_stats *stats, unsigned length)
1229 stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
1230 stats->num_new_observations++;
1234 lzx_should_end_block(struct lzx_block_split_stats *stats)
1236 if (stats->num_observations > 0) {
1238 /* Note: to avoid slow divisions, we do not divide by
1239 * 'num_observations', but rather do all math with the numbers
1240 * multiplied by 'num_observations'. */
1241 u32 total_delta = 0;
1242 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1243 u32 expected = stats->observations[i] *
1244 stats->num_new_observations;
1245 u32 actual = stats->new_observations[i] *
1246 stats->num_observations;
1247 u32 delta = (actual > expected) ? actual - expected :
1249 total_delta += delta;
1252 /* Ready to end the block? */
1254 stats->num_new_observations * 7 / 8 * stats->num_observations)
1258 for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
1259 stats->num_observations += stats->new_observations[i];
1260 stats->observations[i] += stats->new_observations[i];
1261 stats->new_observations[i] = 0;
1263 stats->num_new_observations = 0;
1267 /******************************************************************************/
1268 /* Slower ("near-optimal") compression algorithm */
1269 /*----------------------------------------------------------------------------*/
1272 * Least-recently-used queue for match offsets.
1274 * This is represented as a 64-bit integer for efficiency. There are three
1275 * offsets of 21 bits each. Bit 64 is garbage.
1277 struct lzx_lru_queue {
1279 } _aligned_attribute(8);
1281 #define LZX_QUEUE_OFFSET_SHIFT 21
1282 #define LZX_QUEUE_OFFSET_MASK (((u64)1 << LZX_QUEUE_OFFSET_SHIFT) - 1)
1284 #define LZX_QUEUE_R0_SHIFT (0 * LZX_QUEUE_OFFSET_SHIFT)
1285 #define LZX_QUEUE_R1_SHIFT (1 * LZX_QUEUE_OFFSET_SHIFT)
1286 #define LZX_QUEUE_R2_SHIFT (2 * LZX_QUEUE_OFFSET_SHIFT)
1288 #define LZX_QUEUE_R0_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R0_SHIFT)
1289 #define LZX_QUEUE_R1_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R1_SHIFT)
1290 #define LZX_QUEUE_R2_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R2_SHIFT)
1292 #define LZX_QUEUE_INITIALIZER { \
1293 ((u64)1 << LZX_QUEUE_R0_SHIFT) | \
1294 ((u64)1 << LZX_QUEUE_R1_SHIFT) | \
1295 ((u64)1 << LZX_QUEUE_R2_SHIFT) }
1298 lzx_lru_queue_R0(struct lzx_lru_queue queue)
1300 return (queue.R >> LZX_QUEUE_R0_SHIFT) & LZX_QUEUE_OFFSET_MASK;
1304 lzx_lru_queue_R1(struct lzx_lru_queue queue)
1306 return (queue.R >> LZX_QUEUE_R1_SHIFT) & LZX_QUEUE_OFFSET_MASK;
1310 lzx_lru_queue_R2(struct lzx_lru_queue queue)
1312 return (queue.R >> LZX_QUEUE_R2_SHIFT) & LZX_QUEUE_OFFSET_MASK;
1315 /* Push a match offset onto the front (most recently used) end of the queue. */
1316 static inline struct lzx_lru_queue
1317 lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
1319 return (struct lzx_lru_queue) {
1320 .R = (queue.R << LZX_QUEUE_OFFSET_SHIFT) | offset,
1324 /* Swap a match offset to the front of the queue. */
1325 static inline struct lzx_lru_queue
1326 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
1328 unsigned shift = idx * 21;
1329 const u64 mask = LZX_QUEUE_R0_MASK;
1330 const u64 mask_high = mask << shift;
1332 return (struct lzx_lru_queue) {
1333 (queue.R & ~(mask | mask_high)) |
1334 ((queue.R & mask_high) >> shift) |
1335 ((queue.R & mask) << shift)
1340 lzx_walk_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit,
1343 u32 node_idx = block_size;
1344 u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
1348 /* Special value to mark last sequence */
1349 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
1350 lit_start_node = node_idx;
1356 u32 adjusted_offset;
1358 unsigned offset_slot;
1360 /* Tally literals until either a match or the beginning of the
1361 * block is reached. Note: the item in the node at the
1362 * beginning of the block has all bits set, causing this loop to
1363 * end when it is reached. */
1365 item = c->optimum_nodes[node_idx].item;
1366 if (item & OPTIMUM_LEN_MASK)
1368 c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
1372 #if CONSIDER_GAP_MATCHES
1373 if (item & OPTIMUM_GAP_MATCH) {
1378 /* Record the literal run length for the next sequence
1379 * (the "previous sequence" when walking backwards). */
1380 len = item & OPTIMUM_LEN_MASK;
1382 c->chosen_sequences[seq_idx--].litrunlen =
1383 lit_start_node - node_idx;
1384 lit_start_node = node_idx - len;
1387 /* Tally the rep0 match after the gap. */
1388 v = len - LZX_MIN_MATCH_LEN;
1390 c->chosen_sequences[seq_idx].adjusted_length = v;
1391 if (v >= LZX_NUM_PRIMARY_LENS) {
1392 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1393 v = LZX_NUM_PRIMARY_LENS;
1395 c->freqs.main[LZX_NUM_CHARS + v]++;
1397 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v;
1399 /* Tally the literal in the gap. */
1400 c->freqs.main[(u8)(item >> OPTIMUM_OFFSET_SHIFT)]++;
1402 /* Fall through and tally the match before the gap.
1403 * (It was temporarily saved in the 'cost' field of the
1404 * previous node, which was free to reuse.) */
1405 item = c->optimum_nodes[--node_idx].cost;
1408 #else /* CONSIDER_GAP_MATCHES */
1411 #endif /* !CONSIDER_GAP_MATCHES */
1413 len = item & OPTIMUM_LEN_MASK;
1414 adjusted_offset = item >> OPTIMUM_OFFSET_SHIFT;
1416 /* Record the literal run length for the next sequence (the
1417 * "previous sequence" when walking backwards). */
1419 c->chosen_sequences[seq_idx--].litrunlen =
1420 lit_start_node - node_idx;
1422 lit_start_node = node_idx;
1427 /* Record a match. */
1429 /* Tally the aligned offset symbol if needed. */
1430 if (adjusted_offset >= LZX_MIN_ALIGNED_OFFSET + LZX_OFFSET_ADJUSTMENT)
1431 c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++;
1433 /* Record the adjusted length. */
1434 v = len - LZX_MIN_MATCH_LEN;
1436 c->chosen_sequences[seq_idx].adjusted_length = v;
1438 /* Tally the length symbol if needed. */
1439 if (v >= LZX_NUM_PRIMARY_LENS) {
1440 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
1441 v = LZX_NUM_PRIMARY_LENS;
1444 /* Tally the main symbol. */
1445 offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit);
1446 v += offset_slot * LZX_NUM_LEN_HEADERS;
1447 c->freqs.main[LZX_NUM_CHARS + v]++;
1449 /* Record the adjusted offset and match header. */
1451 c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
1452 (adjusted_offset << 9) | v;
1456 /* Record the literal run length for the first sequence. */
1458 c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
1460 /* Return the index in chosen_sequences at which the sequences begin. */
1465 * Given the minimum-cost path computed through the item graph for the current
1466 * block, walk the path and count how many of each symbol in each Huffman-coded
1467 * alphabet would be required to output the items (matches and literals) along
1470 * Note that the path will be walked backwards (from the end of the block to the
1471 * beginning of the block), but this doesn't matter because this function only
1472 * computes frequencies.
1475 lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1477 lzx_walk_item_list(c, block_size, is_16_bit, false);
1481 * Like lzx_tally_item_list(), but this function also generates the list of
1482 * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
1483 * ready to be output to the bitstream after the Huffman codes are computed.
1484 * The lzx_sequences will be written to decreasing memory addresses as the path
1485 * is walked backwards, which means they will end up in the expected
1486 * first-to-last order. The return value is the index in c->chosen_sequences at
1487 * which the lzx_sequences begin.
1490 lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
1492 return lzx_walk_item_list(c, block_size, is_16_bit, true);
1496 * Find an inexpensive path through the graph of possible match/literal choices
1497 * for the current block. The nodes of the graph are
1498 * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
1499 * the current block, plus one extra node for end-of-block. The edges of the
1500 * graph are matches and literals. The goal is to find the minimum cost path
1501 * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost
1504 * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
1505 * proceeding forwards one node at a time. At each node, a selection of matches
1506 * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
1507 * length 'len' provides a new path to reach the node 'len' bytes later. If
1508 * such a path is the lowest cost found so far to reach that later node, then
1509 * that later node is updated with the new cost and the "arrival" which provided
1512 * Note that although this algorithm is based on minimum cost path search, due
1513 * to various simplifying assumptions the result is not guaranteed to be the
1514 * true minimum cost, or "optimal", path over the graph of all valid LZX
1515 * representations of this block.
1517 * Also, note that because of the presence of the recent offsets queue (which is
1518 * a type of adaptive state), the algorithm cannot work backwards and compute
1519 * "cost to end" instead of "cost to beginning". Furthermore, the way the
1520 * algorithm handles this adaptive state in the "minimum cost" parse is actually
1521 * only an approximation. It's possible for the globally optimal, minimum cost
1522 * path to contain a prefix, ending at a position, where that path prefix is
1523 * *not* the minimum cost path to that position. This can happen if such a path
1524 * prefix results in a different adaptive state which results in lower costs
1525 * later. The algorithm does not solve this problem in general; it only looks
1526 * one step ahead, with the exception of special consideration for "gap
1529 static inline struct lzx_lru_queue
1530 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
1531 const u8 * const restrict block_begin,
1532 const u32 block_size,
1533 const struct lzx_lru_queue initial_queue,
1536 struct lzx_optimum_node *cur_node = c->optimum_nodes;
1537 struct lzx_optimum_node * const end_node = cur_node + block_size;
1538 struct lz_match *cache_ptr = c->match_cache;
1539 const u8 *in_next = block_begin;
1540 const u8 * const block_end = block_begin + block_size;
1543 * Instead of storing the match offset LRU queues in the
1544 * 'lzx_optimum_node' structures, we save memory (and cache lines) by
1545 * storing them in a smaller array. This works because the algorithm
1546 * only requires a limited history of the adaptive state. Once a given
1547 * state is more than LZX_MAX_MATCH_LEN bytes behind the current node
1548 * (more if gap match consideration is enabled; we just round up to 512
1549 * so it's a power of 2), it is no longer needed.
1551 * The QUEUE() macro finds the queue for the given node. This macro has
1552 * been optimized by taking advantage of 'struct lzx_lru_queue' and
1553 * 'struct lzx_optimum_node' both being 8 bytes in size and alignment.
1555 struct lzx_lru_queue queues[512];
1556 STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
1557 STATIC_ASSERT(sizeof(c->optimum_nodes[0]) == sizeof(queues[0]));
1558 #define QUEUE(node) \
1559 (*(struct lzx_lru_queue *)((char *)queues + \
1560 ((uintptr_t)(node) % (ARRAY_LEN(queues) * sizeof(queues[0])))))
1561 /*(queues[(uintptr_t)(node) / sizeof(*(node)) % ARRAY_LEN(queues)])*/
1563 #if CONSIDER_GAP_MATCHES
1564 u32 matches_before_gap[ARRAY_LEN(queues)];
1565 #define MATCH_BEFORE_GAP(node) \
1566 (matches_before_gap[(uintptr_t)(node) / sizeof(*(node)) % \
1567 ARRAY_LEN(matches_before_gap)])
1571 * Initially, the cost to reach each node is "infinity".
1573 * The first node actually should have cost 0, but "infinity"
1574 * (0xFFFFFFFF) works just as well because it immediately overflows.
1576 * The following statement also intentionally sets the 'item' of the
1577 * first node, which would otherwise have no meaning, to 0xFFFFFFFF for
1578 * use as a sentinel. See lzx_walk_item_list().
1580 memset(c->optimum_nodes, 0xFF,
1581 (block_size + 1) * sizeof(c->optimum_nodes[0]));
1583 /* Initialize the recent offsets queue for the first node. */
1584 QUEUE(cur_node) = initial_queue;
1586 do { /* For each node in the block in position order... */
1588 unsigned num_matches;
1593 * A selection of matches for the block was already saved in
1594 * memory so that we don't have to run the uncompressed data
1595 * through the matchfinder on every optimization pass. However,
1596 * we still search for repeat offset matches during each
1597 * optimization pass because we cannot predict the state of the
1598 * recent offsets queue. But as a heuristic, we don't bother
1599 * searching for repeat offset matches if the general-purpose
1600 * matchfinder failed to find any matches.
1602 * Note that a match of length n at some offset implies there is
1603 * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
1604 * that same offset. In other words, we don't necessarily need
1605 * to use the full length of a match. The key heuristic that
1606 * saves a significicant amount of time is that for each
1607 * distinct length, we only consider the smallest offset for
1608 * which that length is available. This heuristic also applies
1609 * to repeat offsets, which we order specially: R0 < R1 < R2 <
1610 * any explicit offset. Of course, this heuristic may be
1611 * produce suboptimal results because offset slots in LZX are
1612 * subject to entropy encoding, but in practice this is a useful
1616 num_matches = cache_ptr->length;
1620 struct lz_match *end_matches = cache_ptr + num_matches;
1621 unsigned next_len = LZX_MIN_MATCH_LEN;
1622 unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
1625 /* Consider rep0 matches. */
1626 matchptr = in_next - lzx_lru_queue_R0(QUEUE(cur_node));
1627 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1629 STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
1631 u32 cost = cur_node->cost +
1632 c->costs.match_cost[0][
1633 next_len - LZX_MIN_MATCH_LEN];
1634 if (cost <= (cur_node + next_len)->cost) {
1635 (cur_node + next_len)->cost = cost;
1636 (cur_node + next_len)->item =
1637 (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
1639 if (unlikely(++next_len > max_len)) {
1640 cache_ptr = end_matches;
1643 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1647 /* Consider rep1 matches. */
1648 matchptr = in_next - lzx_lru_queue_R1(QUEUE(cur_node));
1649 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1651 if (matchptr[next_len - 1] != in_next[next_len - 1])
1653 for (unsigned len = 2; len < next_len - 1; len++)
1654 if (matchptr[len] != in_next[len])
1657 u32 cost = cur_node->cost +
1658 c->costs.match_cost[1][
1659 next_len - LZX_MIN_MATCH_LEN];
1660 if (cost <= (cur_node + next_len)->cost) {
1661 (cur_node + next_len)->cost = cost;
1662 (cur_node + next_len)->item =
1663 (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
1665 if (unlikely(++next_len > max_len)) {
1666 cache_ptr = end_matches;
1669 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1673 /* Consider rep2 matches. */
1674 matchptr = in_next - lzx_lru_queue_R2(QUEUE(cur_node));
1675 if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
1677 if (matchptr[next_len - 1] != in_next[next_len - 1])
1679 for (unsigned len = 2; len < next_len - 1; len++)
1680 if (matchptr[len] != in_next[len])
1683 u32 cost = cur_node->cost +
1684 c->costs.match_cost[2][
1685 next_len - LZX_MIN_MATCH_LEN];
1686 if (cost <= (cur_node + next_len)->cost) {
1687 (cur_node + next_len)->cost = cost;
1688 (cur_node + next_len)->item =
1689 (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
1691 if (unlikely(++next_len > max_len)) {
1692 cache_ptr = end_matches;
1695 } while (in_next[next_len - 1] == matchptr[next_len - 1]);
1699 while (next_len > cache_ptr->length)
1700 if (++cache_ptr == end_matches)
1703 /* Consider explicit offset matches. */
1705 u32 offset = cache_ptr->offset;
1706 u32 adjusted_offset = offset + LZX_OFFSET_ADJUSTMENT;
1707 unsigned offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit);
1708 u32 base_cost = cur_node->cost;
1711 #if CONSIDER_ALIGNED_COSTS
1712 if (offset >= LZX_MIN_ALIGNED_OFFSET)
1713 base_cost += c->costs.aligned[adjusted_offset &
1714 LZX_ALIGNED_OFFSET_BITMASK];
1718 c->costs.match_cost[offset_slot][
1719 next_len - LZX_MIN_MATCH_LEN];
1720 if (cost < (cur_node + next_len)->cost) {
1721 (cur_node + next_len)->cost = cost;
1722 (cur_node + next_len)->item =
1723 (adjusted_offset << OPTIMUM_OFFSET_SHIFT) | next_len;
1725 } while (++next_len <= cache_ptr->length);
1727 if (++cache_ptr == end_matches) {
1728 #if CONSIDER_GAP_MATCHES
1729 /* Also consider the longest explicit
1730 * offset match as a "gap match": match
1732 s32 remaining = (block_end - in_next) - (s32)next_len;
1733 if (likely(remaining >= 2)) {
1734 const u8 *strptr = in_next + next_len;
1735 const u8 *matchptr = strptr - offset;
1736 if (load_u16_unaligned(strptr) == load_u16_unaligned(matchptr)) {
1737 STATIC_ASSERT(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2 >= 250);
1738 STATIC_ASSERT(ARRAY_LEN(queues) == ARRAY_LEN(matches_before_gap));
1739 unsigned limit = min(remaining,
1740 min(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2,
1741 LZX_MAX_MATCH_LEN));
1742 unsigned rep0_len = lz_extend(strptr, matchptr, 2, limit);
1743 u8 lit = strptr[-1];
1744 cost += c->costs.main[lit] +
1745 c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN];
1746 unsigned total_len = next_len + rep0_len;
1747 if (cost < (cur_node + total_len)->cost) {
1748 (cur_node + total_len)->cost = cost;
1749 (cur_node + total_len)->item =
1751 ((u32)lit << OPTIMUM_OFFSET_SHIFT) |
1753 MATCH_BEFORE_GAP(cur_node + total_len) =
1754 (adjusted_offset << OPTIMUM_OFFSET_SHIFT) |
1759 #endif /* CONSIDER_GAP_MATCHES */
1767 /* Consider coding a literal.
1769 * To avoid an extra branch, actually checking the preferability
1770 * of coding the literal is integrated into the queue update
1772 literal = *in_next++;
1773 cost = cur_node->cost + c->costs.main[literal];
1775 /* Advance to the next position. */
1778 /* The lowest-cost path to the current position is now known.
1779 * Finalize the recent offsets queue that results from taking
1780 * this lowest-cost path. */
1782 if (cost <= cur_node->cost) {
1783 /* Literal: queue remains unchanged. */
1784 cur_node->cost = cost;
1785 cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
1786 QUEUE(cur_node) = QUEUE(cur_node - 1);
1788 /* Match: queue update is needed. */
1789 unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
1790 #if CONSIDER_GAP_MATCHES
1791 s32 adjusted_offset = (s32)cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1792 STATIC_ASSERT(OPTIMUM_GAP_MATCH == 0x80000000); /* assuming sign extension */
1794 u32 adjusted_offset = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
1797 if (adjusted_offset >= LZX_NUM_RECENT_OFFSETS) {
1798 /* Explicit offset match: insert offset at front. */
1800 lzx_lru_queue_push(QUEUE(cur_node - len),
1801 adjusted_offset - LZX_OFFSET_ADJUSTMENT);
1803 #if CONSIDER_GAP_MATCHES
1804 else if (adjusted_offset < 0) {
1805 /* "Gap match": Explicit offset match, then a
1806 * literal, then rep0 match. Save the explicit
1807 * offset match information in the cost field of
1808 * the previous node, which isn't needed
1809 * anymore. Then insert the offset at the front
1811 u32 match_before_gap = MATCH_BEFORE_GAP(cur_node);
1812 (cur_node - 1)->cost = match_before_gap;
1814 lzx_lru_queue_push(QUEUE(cur_node - len - 1 -
1815 (match_before_gap & OPTIMUM_LEN_MASK)),
1816 (match_before_gap >> OPTIMUM_OFFSET_SHIFT) -
1817 LZX_OFFSET_ADJUSTMENT);
1821 /* Repeat offset match: swap offset to front. */
1823 lzx_lru_queue_swap(QUEUE(cur_node - len),
1827 } while (cur_node != end_node);
1829 /* Return the recent offsets queue at the end of the path. */
1830 return QUEUE(cur_node);
1834 * Given the costs for the main and length codewords (c->costs.main and
1835 * c->costs.len), initialize the match cost array (c->costs.match_cost) which
1836 * directly provides the cost of every possible (length, offset slot) pair.
1839 lzx_compute_match_costs(struct lzx_compressor *c)
1841 unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
1842 LZX_NUM_LEN_HEADERS;
1843 struct lzx_costs *costs = &c->costs;
1844 unsigned main_symbol = LZX_NUM_CHARS;
1846 for (unsigned offset_slot = 0; offset_slot < num_offset_slots;
1849 u32 extra_cost = lzx_extra_offset_bits[offset_slot] * BIT_COST;
1852 #if CONSIDER_ALIGNED_COSTS
1853 if (offset_slot >= LZX_MIN_ALIGNED_OFFSET_SLOT)
1854 extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * BIT_COST;
1857 for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) {
1858 costs->match_cost[offset_slot][i] =
1859 costs->main[main_symbol++] + extra_cost;
1862 extra_cost += costs->main[main_symbol++];
1864 for (; i < LZX_NUM_LENS; i++) {
1865 costs->match_cost[offset_slot][i] =
1866 costs->len[i - LZX_NUM_PRIMARY_LENS] +
1873 * Fast approximation for log2f(x). This is not as accurate as the standard C
1874 * version. It does not need to be perfectly accurate because it is only used
1875 * for estimating symbol costs, which is very approximate anyway.
1885 /* Extract the exponent and subtract 127 to remove the bias. This gives
1886 * the integer part of the result. */
1887 float res = ((u.i >> 23) & 0xFF) - 127;
1889 /* Set the exponent to 0 (plus bias of 127). This transforms the number
1890 * to the range [1, 2) while retaining the same mantissa. */
1891 u.i = (u.i & ~(0xFF << 23)) | (127 << 23);
1894 * Approximate the log2 of the transformed number using a degree 2
1895 * interpolating polynomial for log2(x) over the interval [1, 2). Then
1896 * add this to the extracted exponent to produce the final approximation
1899 * The coefficients of the interpolating polynomial used here were found
1900 * using the script tools/log2_interpolation.r.
1902 return res - 1.653124006f + u.f * (1.9941812f - u.f * 0.3347490189f);
1907 * Return the estimated cost of a symbol which has been estimated to have the
1908 * given probability.
1911 lzx_cost_for_probability(float prob)
1914 * The basic formula is:
1916 * entropy = -log2(probability)
1918 * Use this to get the cost in fractional bits. Then multiply by our
1919 * scaling factor of BIT_COST and truncate to a u32.
1921 * In addition, the minimum cost is BIT_COST (one bit) because the
1922 * entropy coding method will be Huffman codes.
1924 u32 cost = -log2f_fast(prob) * BIT_COST;
1925 return max(cost, BIT_COST);
1929 * Mapping: number of used literals => heuristic probability of a literal times
1930 * 6870. Generated by running this R command:
1932 * cat(paste(round(6870*2^-((304+(0:256))/64)), collapse=", "))
1934 static const u8 literal_scaled_probs[257] = {
1935 255, 253, 250, 247, 244, 242, 239, 237, 234, 232, 229, 227, 224, 222,
1936 219, 217, 215, 212, 210, 208, 206, 203, 201, 199, 197, 195, 193, 191,
1937 189, 186, 184, 182, 181, 179, 177, 175, 173, 171, 169, 167, 166, 164,
1938 162, 160, 159, 157, 155, 153, 152, 150, 149, 147, 145, 144, 142, 141,
1939 139, 138, 136, 135, 133, 132, 130, 129, 128, 126, 125, 124, 122, 121,
1940 120, 118, 117, 116, 115, 113, 112, 111, 110, 109, 107, 106, 105, 104,
1941 103, 102, 101, 100, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86,
1942 86, 85, 84, 83, 82, 81, 80, 79, 78, 78, 77, 76, 75, 74, 73, 73, 72, 71,
1943 70, 70, 69, 68, 67, 67, 66, 65, 65, 64, 63, 62, 62, 61, 60, 60, 59, 59,
1944 58, 57, 57, 56, 55, 55, 54, 54, 53, 53, 52, 51, 51, 50, 50, 49, 49, 48,
1945 48, 47, 47, 46, 46, 45, 45, 44, 44, 43, 43, 42, 42, 41, 41, 40, 40, 40,
1946 39, 39, 38, 38, 38, 37, 37, 36, 36, 36, 35, 35, 34, 34, 34, 33, 33, 33,
1947 32, 32, 32, 31, 31, 31, 30, 30, 30, 29, 29, 29, 28, 28, 28, 27, 27, 27,
1948 27, 26, 26, 26, 25, 25, 25, 25, 24, 24, 24, 24, 23, 23, 23, 23, 22, 22,
1949 22, 22, 21, 21, 21, 21, 20, 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18,
1950 18, 18, 18, 17, 17, 17, 17, 17, 16, 16, 16, 16
1954 * Mapping: length symbol => default cost of that symbol. This is derived from
1955 * sample data but has been slightly edited to add more bias towards the
1956 * shortest lengths, which are the most common.
1958 static const u16 lzx_default_len_costs[LZX_LENCODE_NUM_SYMBOLS] = {
1959 300, 310, 320, 330, 360, 396, 399, 416, 451, 448, 463, 466, 505, 492,
1960 503, 514, 547, 531, 566, 561, 589, 563, 592, 586, 623, 602, 639, 627,
1961 659, 643, 657, 650, 685, 662, 661, 672, 685, 686, 696, 680, 657, 682,
1962 666, 699, 674, 699, 679, 709, 688, 712, 692, 714, 694, 716, 698, 712,
1963 706, 727, 714, 727, 713, 723, 712, 718, 719, 719, 720, 735, 725, 735,
1964 728, 740, 727, 739, 727, 742, 716, 733, 733, 740, 738, 746, 737, 747,
1965 738, 745, 736, 748, 742, 749, 745, 749, 743, 748, 741, 752, 745, 752,
1966 747, 750, 747, 752, 748, 753, 750, 752, 753, 753, 749, 744, 752, 755,
1967 753, 756, 745, 748, 746, 745, 723, 757, 755, 758, 755, 758, 752, 757,
1968 754, 757, 755, 759, 755, 758, 753, 755, 755, 758, 757, 761, 755, 750,
1969 758, 759, 759, 760, 758, 751, 757, 757, 759, 759, 758, 759, 758, 761,
1970 750, 761, 758, 760, 759, 761, 758, 761, 760, 752, 759, 760, 759, 759,
1971 757, 762, 760, 761, 761, 748, 761, 760, 762, 763, 752, 762, 762, 763,
1972 762, 762, 763, 763, 762, 763, 762, 763, 762, 763, 763, 764, 763, 762,
1973 763, 762, 762, 762, 764, 764, 763, 764, 763, 763, 763, 762, 763, 763,
1974 762, 764, 764, 763, 762, 763, 763, 763, 763, 762, 764, 763, 762, 764,
1975 764, 763, 763, 765, 764, 764, 762, 763, 764, 765, 763, 764, 763, 764,
1976 762, 764, 764, 754, 763, 764, 763, 763, 762, 763, 584,
1979 /* Set default costs to bootstrap the iterative optimization algorithm. */
1981 lzx_set_default_costs(struct lzx_compressor *c)
1984 u32 num_literals = 0;
1985 u32 num_used_literals = 0;
1986 float inv_num_matches = 1.0f / c->freqs.main[LZX_NUM_CHARS];
1987 float inv_num_items;
1988 float prob_match = 1.0f;
1990 float base_literal_prob;
1992 /* Some numbers here have been hardcoded to assume a bit cost of 64. */
1993 STATIC_ASSERT(BIT_COST == 64);
1995 /* Estimate the number of literals that will used. 'num_literals' is
1996 * the total number, whereas 'num_used_literals' is the number of
1997 * distinct symbols. */
1998 for (i = 0; i < LZX_NUM_CHARS; i++) {
1999 num_literals += c->freqs.main[i];
2000 num_used_literals += (c->freqs.main[i] != 0);
2003 /* Note: all match headers were tallied as symbol 'LZX_NUM_CHARS'. We
2004 * don't attempt to estimate which ones will be used. */
2006 inv_num_items = 1.0f / (num_literals + c->freqs.main[LZX_NUM_CHARS]);
2007 base_literal_prob = literal_scaled_probs[num_used_literals] *
2010 /* Literal costs. We use two different methods to compute the
2011 * probability of each literal and mix together their results. */
2012 for (i = 0; i < LZX_NUM_CHARS; i++) {
2013 u32 freq = c->freqs.main[i];
2015 float prob = 0.5f * ((freq * inv_num_items) +
2017 c->costs.main[i] = lzx_cost_for_probability(prob);
2020 c->costs.main[i] = 11 * BIT_COST;
2024 /* Match header costs. We just assume that all match headers are
2025 * equally probable, but we do take into account the relative cost of a
2026 * match header vs. a literal depending on how common matches are
2027 * expected to be vs. literals. */
2028 prob_match = max(prob_match, 0.15f);
2029 match_cost = lzx_cost_for_probability(prob_match / (c->num_main_syms -
2031 for (; i < c->num_main_syms; i++)
2032 c->costs.main[i] = match_cost;
2034 /* Length symbol costs. These are just set to fixed values which
2035 * reflect the fact the smallest lengths are typically the most common,
2036 * and therefore are typically the cheapest. */
2037 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
2038 c->costs.len[i] = lzx_default_len_costs[i];
2040 #if CONSIDER_ALIGNED_COSTS
2041 /* Aligned offset symbol costs. These are derived from the estimated
2042 * probability of each aligned offset symbol. */
2043 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
2044 /* We intentionally tallied the frequencies in the wrong slots,
2045 * not accounting for LZX_OFFSET_ADJUSTMENT, since doing the
2046 * fixup here is faster: a constant 8 subtractions here vs. one
2047 * addition for every match. */
2048 unsigned j = (i - LZX_OFFSET_ADJUSTMENT) & LZX_ALIGNED_OFFSET_BITMASK;
2049 if (c->freqs.aligned[j] != 0) {
2050 float prob = c->freqs.aligned[j] * inv_num_matches;
2051 c->costs.aligned[i] = lzx_cost_for_probability(prob);
2053 c->costs.aligned[i] =
2054 (2 * LZX_NUM_ALIGNED_OFFSET_BITS) * BIT_COST;
2060 /* Update the current cost model to reflect the computed Huffman codes. */
2062 lzx_set_costs_from_codes(struct lzx_compressor *c)
2065 const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
2067 for (i = 0; i < c->num_main_syms; i++) {
2068 c->costs.main[i] = (lens->main[i] ? lens->main[i] :
2069 MAIN_CODEWORD_LIMIT) * BIT_COST;
2072 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
2073 c->costs.len[i] = (lens->len[i] ? lens->len[i] :
2074 LENGTH_CODEWORD_LIMIT) * BIT_COST;
2077 #if CONSIDER_ALIGNED_COSTS
2078 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
2079 c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
2080 ALIGNED_CODEWORD_LIMIT) * BIT_COST;
2086 * Choose a "near-optimal" literal/match sequence to use for the current block,
2087 * then flush the block. Because the cost of each Huffman symbol is unknown
2088 * until the Huffman codes have been built and the Huffman codes themselves
2089 * depend on the symbol frequencies, this uses an iterative optimization
2090 * algorithm to approximate an optimal solution. The first optimization pass
2091 * for the block uses default costs; additional passes use costs derived from
2092 * the Huffman codes computed in the previous pass.
2094 static inline struct lzx_lru_queue
2095 lzx_optimize_and_flush_block(struct lzx_compressor * const restrict c,
2096 struct lzx_output_bitstream * const restrict os,
2097 const u8 * const restrict block_begin,
2098 const u32 block_size,
2099 const struct lzx_lru_queue initial_queue,
2102 unsigned num_passes_remaining = c->num_optim_passes;
2103 struct lzx_lru_queue new_queue;
2106 lzx_set_default_costs(c);
2109 lzx_compute_match_costs(c);
2110 new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
2111 initial_queue, is_16_bit);
2113 if (--num_passes_remaining == 0)
2116 /* At least one optimization pass remains. Update the costs. */
2117 lzx_reset_symbol_frequencies(c);
2118 lzx_tally_item_list(c, block_size, is_16_bit);
2119 lzx_build_huffman_codes(c);
2120 lzx_set_costs_from_codes(c);
2123 /* Done optimizing. Generate the sequence list and flush the block. */
2124 lzx_reset_symbol_frequencies(c);
2125 seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
2126 lzx_flush_block(c, os, block_begin, block_size, seq_idx);
2131 * This is the "near-optimal" LZX compressor.
2133 * For each block, it performs a relatively thorough graph search to find an
2134 * inexpensive (in terms of compressed size) way to output the block.
2136 * Note: there are actually many things this algorithm leaves on the table in
2137 * terms of compression ratio. So although it may be "near-optimal", it is
2138 * certainly not "optimal". The goal is not to produce the optimal compression
2139 * ratio, which for LZX is probably impossible within any practical amount of
2140 * time, but rather to produce a compression ratio significantly better than a
2141 * simpler "greedy" or "lazy" parse while still being relatively fast.
2144 lzx_compress_near_optimal(struct lzx_compressor * restrict c,
2145 const u8 * const restrict in_begin, size_t in_nbytes,
2146 struct lzx_output_bitstream * restrict os,
2149 const u8 * in_next = in_begin;
2150 const u8 * const in_end = in_begin + in_nbytes;
2151 u32 max_len = LZX_MAX_MATCH_LEN;
2152 u32 nice_len = min(c->nice_match_length, max_len);
2153 u32 next_hashes[2] = {0, 0};
2154 struct lzx_lru_queue queue = LZX_QUEUE_INITIALIZER;
2156 /* Initialize the matchfinder. */
2157 CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
2160 /* Starting a new block */
2162 const u8 * const in_block_begin = in_next;
2163 const u8 * const in_max_block_end =
2164 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2165 struct lz_match *cache_ptr = c->match_cache;
2166 const u8 *next_search_pos = in_next;
2167 const u8 *next_observation = in_next;
2168 const u8 *next_pause_point =
2169 min(in_next + min(MIN_BLOCK_SIZE,
2170 in_max_block_end - in_next),
2171 in_max_block_end - min(LZX_MAX_MATCH_LEN - 1,
2172 in_max_block_end - in_next));
2174 lzx_init_block_split_stats(&c->split_stats);
2175 lzx_reset_symbol_frequencies(c);
2177 if (in_next >= next_pause_point)
2181 * Run the input buffer through the matchfinder, caching the
2182 * matches, until we decide to end the block.
2184 * For a tighter matchfinding loop, we compute a "pause point",
2185 * which is the next position at which we may need to check
2186 * whether to end the block or to decrease max_len. We then
2187 * only do these extra checks upon reaching the pause point.
2189 resume_matchfinding:
2191 if (in_next >= next_search_pos) {
2192 /* Search for matches at this position. */
2193 struct lz_match *lz_matchptr;
2196 lz_matchptr = CALL_BT_MF(is_16_bit, c,
2197 bt_matchfinder_get_matches,
2202 c->max_search_depth,
2206 cache_ptr->length = lz_matchptr - (cache_ptr + 1);
2207 cache_ptr = lz_matchptr;
2209 /* Accumulate literal/match statistics for block
2210 * splitting and for generating the initial cost
2212 if (in_next >= next_observation) {
2213 best_len = cache_ptr[-1].length;
2214 if (best_len >= 3) {
2215 /* Match (len >= 3) */
2218 * Note: for performance reasons this has
2219 * been simplified significantly:
2221 * - We wait until later to account for
2222 * LZX_OFFSET_ADJUSTMENT.
2223 * - We don't account for repeat offsets.
2224 * - We don't account for different match headers.
2226 c->freqs.aligned[cache_ptr[-1].offset &
2227 LZX_ALIGNED_OFFSET_BITMASK]++;
2228 c->freqs.main[LZX_NUM_CHARS]++;
2230 lzx_observe_match(&c->split_stats, best_len);
2231 next_observation = in_next + best_len;
2234 c->freqs.main[*in_next]++;
2235 lzx_observe_literal(&c->split_stats, *in_next);
2236 next_observation = in_next + 1;
2241 * If there was a very long match found, then
2242 * don't cache any matches for the bytes covered
2243 * by that match. This avoids degenerate
2244 * behavior when compressing highly redundant
2245 * data, where the number of matches can be very
2248 * This heuristic doesn't actually hurt the
2249 * compression ratio *too* much. If there's a
2250 * long match, then the data must be highly
2251 * compressible, so it doesn't matter as much
2254 if (best_len >= nice_len)
2255 next_search_pos = in_next + best_len;
2257 /* Don't search for matches at this position. */
2258 CALL_BT_MF(is_16_bit, c,
2259 bt_matchfinder_skip_position,
2263 c->max_search_depth,
2265 cache_ptr->length = 0;
2268 } while (++in_next < next_pause_point &&
2269 likely(cache_ptr < &c->match_cache[CACHE_LENGTH]));
2273 /* Adjust max_len and nice_len if we're nearing the end of the
2274 * input buffer. In addition, if we are so close to the end of
2275 * the input buffer that there cannot be any more matches, then
2276 * just advance through the last few positions and record no
2278 if (unlikely(max_len > in_end - in_next)) {
2279 max_len = in_end - in_next;
2280 nice_len = min(max_len, nice_len);
2281 if (max_len < BT_MATCHFINDER_REQUIRED_NBYTES) {
2282 while (in_next != in_end) {
2283 cache_ptr->length = 0;
2290 /* End the block if the match cache may overflow. */
2291 if (unlikely(cache_ptr >= &c->match_cache[CACHE_LENGTH]))
2294 /* End the block if the soft maximum size has been reached. */
2295 if (in_next >= in_max_block_end)
2298 /* End the block if the block splitting algorithm thinks this is
2299 * a good place to do so. */
2300 if (c->split_stats.num_new_observations >=
2301 NUM_OBSERVATIONS_PER_BLOCK_CHECK &&
2302 in_max_block_end - in_next >= MIN_BLOCK_SIZE &&
2303 lzx_should_end_block(&c->split_stats))
2306 /* It's not time to end the block yet. Compute the next pause
2307 * point and resume matchfinding. */
2309 min(in_next + min(NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 -
2310 c->split_stats.num_new_observations,
2311 in_max_block_end - in_next),
2312 in_max_block_end - min(LZX_MAX_MATCH_LEN - 1,
2313 in_max_block_end - in_next));
2314 goto resume_matchfinding;
2317 /* We've decided on a block boundary and cached matches. Now
2318 * choose a match/literal sequence and flush the block. */
2319 queue = lzx_optimize_and_flush_block(c, os, in_block_begin,
2320 in_next - in_block_begin,
2322 } while (in_next != in_end);
2326 lzx_compress_near_optimal_16(struct lzx_compressor *c, const u8 *in,
2327 size_t in_nbytes, struct lzx_output_bitstream *os)
2329 lzx_compress_near_optimal(c, in, in_nbytes, os, true);
2333 lzx_compress_near_optimal_32(struct lzx_compressor *c, const u8 *in,
2334 size_t in_nbytes, struct lzx_output_bitstream *os)
2336 lzx_compress_near_optimal(c, in, in_nbytes, os, false);
2339 /******************************************************************************/
2340 /* Faster ("lazy") compression algorithm */
2341 /*----------------------------------------------------------------------------*/
2344 * Called when the compressor chooses to use a literal. This tallies the
2345 * Huffman symbol for the literal, increments the current literal run length,
2346 * and "observes" the literal for the block split statistics.
2349 lzx_choose_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
2351 lzx_observe_literal(&c->split_stats, literal);
2352 c->freqs.main[literal]++;
2357 * Called when the compressor chooses to use a match. This tallies the Huffman
2358 * symbol(s) for a match, saves the match data and the length of the preceding
2359 * literal run, updates the recent offsets queue, and "observes" the match for
2360 * the block split statistics.
2363 lzx_choose_match(struct lzx_compressor *c, unsigned length, u32 adjusted_offset,
2364 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
2365 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
2367 u32 litrunlen = *litrunlen_p;
2368 struct lzx_sequence *next_seq = *next_seq_p;
2369 unsigned offset_slot;
2372 lzx_observe_match(&c->split_stats, length);
2374 v = length - LZX_MIN_MATCH_LEN;
2376 /* Save the literal run length and adjusted length. */
2377 next_seq->litrunlen = litrunlen;
2378 next_seq->adjusted_length = v;
2380 /* Compute the length header, then tally the length symbol if needed. */
2381 if (v >= LZX_NUM_PRIMARY_LENS) {
2382 c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
2383 v = LZX_NUM_PRIMARY_LENS;
2386 /* Compute the offset slot. */
2387 offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit);
2389 /* Compute the match header. */
2390 v += offset_slot * LZX_NUM_LEN_HEADERS;
2392 /* Save the adjusted offset and match header. */
2393 next_seq->adjusted_offset_and_match_hdr = (adjusted_offset << 9) | v;
2395 /* Tally the main symbol. */
2396 c->freqs.main[LZX_NUM_CHARS + v]++;
2398 /* Update the recent offsets queue. */
2399 if (adjusted_offset < LZX_NUM_RECENT_OFFSETS) {
2400 /* Repeat offset match. */
2401 swap(recent_offsets[0], recent_offsets[adjusted_offset]);
2403 /* Explicit offset match. */
2405 /* Tally the aligned offset symbol if needed. */
2406 if (adjusted_offset >= 16)
2407 c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++;
2409 recent_offsets[2] = recent_offsets[1];
2410 recent_offsets[1] = recent_offsets[0];
2411 recent_offsets[0] = adjusted_offset - LZX_OFFSET_ADJUSTMENT;
2414 /* Reset the literal run length and advance to the next sequence. */
2415 *next_seq_p = next_seq + 1;
2420 * Called when the compressor ends a block. This finshes the last lzx_sequence,
2421 * which is just a literal run with no following match. This literal run might
2425 lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
2427 last_seq->litrunlen = litrunlen;
2429 /* Special value to mark last sequence */
2430 last_seq->adjusted_offset_and_match_hdr = 0x80000000;
2434 * Find the longest repeat offset match with the current position. If a match
2435 * is found, return its length and set *best_rep_idx_ret to the index of its
2436 * offset in @recent_offsets. Otherwise, return 0.
2438 * Don't bother with length 2 matches; consider matches of length >= 3 only.
2439 * Also assume that max_len >= 3.
2442 lzx_find_longest_repeat_offset_match(const u8 * const in_next,
2443 const u32 recent_offsets[],
2444 const unsigned max_len,
2445 unsigned *best_rep_idx_ret)
2447 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); /* loop is unrolled */
2449 const u32 seq3 = load_u24_unaligned(in_next);
2451 unsigned best_rep_len = 0;
2452 unsigned best_rep_idx = 0;
2455 /* Check for rep0 match (most recent offset) */
2456 matchptr = in_next - recent_offsets[0];
2457 if (load_u24_unaligned(matchptr) == seq3)
2458 best_rep_len = lz_extend(in_next, matchptr, 3, max_len);
2460 /* Check for rep1 match (second most recent offset) */
2461 matchptr = in_next - recent_offsets[1];
2462 if (load_u24_unaligned(matchptr) == seq3) {
2463 rep_len = lz_extend(in_next, matchptr, 3, max_len);
2464 if (rep_len > best_rep_len) {
2465 best_rep_len = rep_len;
2470 /* Check for rep2 match (third most recent offset) */
2471 matchptr = in_next - recent_offsets[2];
2472 if (load_u24_unaligned(matchptr) == seq3) {
2473 rep_len = lz_extend(in_next, matchptr, 3, max_len);
2474 if (rep_len > best_rep_len) {
2475 best_rep_len = rep_len;
2480 *best_rep_idx_ret = best_rep_idx;
2481 return best_rep_len;
2485 * Fast heuristic scoring for lazy parsing: how "good" is this match?
2486 * This is mainly determined by the length: longer matches are better.
2487 * However, we also give a bonus to close (small offset) matches and to repeat
2488 * offset matches, since those require fewer bits to encode.
2491 static inline unsigned
2492 lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
2494 unsigned score = len;
2496 if (adjusted_offset < 4096)
2498 if (adjusted_offset < 256)
2504 static inline unsigned
2505 lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
2511 * This is the "lazy" LZX compressor. The basic idea is that before it chooses
2512 * a match, it checks to see if there's a longer match at the next position. If
2513 * yes, it chooses a literal and continues to the next position. If no, it
2514 * chooses the match.
2516 * Some additional heuristics are used as well. Repeat offset matches are
2517 * considered favorably and sometimes are chosen immediately. In addition, long
2518 * matches (at least "nice_len" bytes) are chosen immediately as well. Finally,
2519 * when we decide whether a match is "better" than another, we take the offset
2520 * into consideration as well as the length.
2523 lzx_compress_lazy(struct lzx_compressor * restrict c,
2524 const u8 * const restrict in_begin, size_t in_nbytes,
2525 struct lzx_output_bitstream * restrict os, bool is_16_bit)
2527 const u8 * in_next = in_begin;
2528 const u8 * const in_end = in_begin + in_nbytes;
2529 unsigned max_len = LZX_MAX_MATCH_LEN;
2530 unsigned nice_len = min(c->nice_match_length, max_len);
2531 STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
2532 u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
2533 u32 next_hashes[2] = {0, 0};
2535 /* Initialize the matchfinder. */
2536 CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
2539 /* Starting a new block */
2541 const u8 * const in_block_begin = in_next;
2542 const u8 * const in_max_block_end =
2543 in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
2544 struct lzx_sequence *next_seq = c->chosen_sequences;
2548 u32 cur_adjusted_offset;
2552 u32 next_adjusted_offset;
2553 unsigned next_score;
2554 unsigned best_rep_len;
2555 unsigned best_rep_idx;
2559 lzx_reset_symbol_frequencies(c);
2560 lzx_init_block_split_stats(&c->split_stats);
2563 /* Adjust max_len and nice_len if we're nearing the end
2564 * of the input buffer. */
2565 if (unlikely(max_len > in_end - in_next)) {
2566 max_len = in_end - in_next;
2567 nice_len = min(max_len, nice_len);
2570 /* Find the longest match (subject to the
2571 * max_search_depth cutoff parameter) with the current
2572 * position. Don't bother with length 2 matches; only
2573 * look for matches of length >= 3. */
2574 cur_len = CALL_HC_MF(is_16_bit, c,
2575 hc_matchfinder_longest_match,
2581 c->max_search_depth,
2585 /* If there was no match found, or the only match found
2586 * was a distant short match, then choose a literal. */
2589 cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
2590 cur_offset != recent_offsets[0] &&
2591 cur_offset != recent_offsets[1] &&
2592 cur_offset != recent_offsets[2]))
2594 lzx_choose_literal(c, *in_next, &litrunlen);
2599 /* Heuristic: if this match has the most recent offset,
2600 * then go ahead and choose it as a rep0 match. */
2601 if (cur_offset == recent_offsets[0]) {
2603 skip_len = cur_len - 1;
2604 cur_adjusted_offset = 0;
2605 goto choose_cur_match;
2608 /* Compute the longest match's score as an explicit
2610 cur_adjusted_offset = cur_offset + LZX_OFFSET_ADJUSTMENT;
2611 cur_score = lzx_explicit_offset_match_score(cur_len, cur_adjusted_offset);
2613 /* Find the longest repeat offset match at this
2614 * position. If we find one and it's "better" than the
2615 * explicit offset match we found, then go ahead and
2616 * choose the repeat offset match immediately. */
2617 best_rep_len = lzx_find_longest_repeat_offset_match(in_next,
2623 if (best_rep_len != 0 &&
2624 (rep_score = lzx_repeat_offset_match_score(best_rep_len,
2625 best_rep_idx)) >= cur_score)
2627 cur_len = best_rep_len;
2628 cur_adjusted_offset = best_rep_idx;
2629 skip_len = best_rep_len - 1;
2630 goto choose_cur_match;
2635 * We have a match at the current position. If the
2636 * match is very long, then choose it immediately.
2637 * Otherwise, see if there's a better match at the next
2641 if (cur_len >= nice_len) {
2642 skip_len = cur_len - 1;
2643 goto choose_cur_match;
2646 if (unlikely(max_len > in_end - in_next)) {
2647 max_len = in_end - in_next;
2648 nice_len = min(max_len, nice_len);
2651 next_len = CALL_HC_MF(is_16_bit, c,
2652 hc_matchfinder_longest_match,
2658 c->max_search_depth / 2,
2662 if (next_len <= cur_len - 2) {
2663 /* No potentially better match was found. */
2665 skip_len = cur_len - 2;
2666 goto choose_cur_match;
2669 next_adjusted_offset = next_offset + LZX_OFFSET_ADJUSTMENT;
2670 next_score = lzx_explicit_offset_match_score(next_len, next_adjusted_offset);
2672 best_rep_len = lzx_find_longest_repeat_offset_match(in_next,
2678 if (best_rep_len != 0 &&
2679 (rep_score = lzx_repeat_offset_match_score(best_rep_len,
2680 best_rep_idx)) >= next_score)
2683 if (rep_score > cur_score) {
2684 /* The next match is better, and it's a
2685 * repeat offset match. */
2686 lzx_choose_literal(c, *(in_next - 2),
2688 cur_len = best_rep_len;
2689 cur_adjusted_offset = best_rep_idx;
2690 skip_len = cur_len - 1;
2691 goto choose_cur_match;
2694 if (next_score > cur_score) {
2695 /* The next match is better, and it's an
2696 * explicit offset match. */
2697 lzx_choose_literal(c, *(in_next - 2),
2700 cur_adjusted_offset = next_adjusted_offset;
2701 cur_score = next_score;
2702 goto have_cur_match;
2706 /* The original match was better; choose it. */
2707 skip_len = cur_len - 2;
2710 /* Choose a match and have the matchfinder skip over its
2711 * remaining bytes. */
2712 lzx_choose_match(c, cur_len, cur_adjusted_offset,
2713 recent_offsets, is_16_bit,
2714 &litrunlen, &next_seq);
2715 in_next = CALL_HC_MF(is_16_bit, c,
2716 hc_matchfinder_skip_positions,
2723 /* Keep going until it's time to end the block. */
2724 } while (in_next < in_max_block_end &&
2725 !(c->split_stats.num_new_observations >=
2726 NUM_OBSERVATIONS_PER_BLOCK_CHECK &&
2727 in_next - in_block_begin >= MIN_BLOCK_SIZE &&
2728 in_end - in_next >= MIN_BLOCK_SIZE &&
2729 lzx_should_end_block(&c->split_stats)));
2731 /* Flush the block. */
2732 lzx_finish_sequence(next_seq, litrunlen);
2733 lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0);
2735 /* Keep going until we've reached the end of the input buffer. */
2736 } while (in_next != in_end);
2740 lzx_compress_lazy_16(struct lzx_compressor *c, const u8 *in, size_t in_nbytes,
2741 struct lzx_output_bitstream *os)
2743 lzx_compress_lazy(c, in, in_nbytes, os, true);
2747 lzx_compress_lazy_32(struct lzx_compressor *c, const u8 *in, size_t in_nbytes,
2748 struct lzx_output_bitstream *os)
2750 lzx_compress_lazy(c, in, in_nbytes, os, false);
2753 /******************************************************************************/
2754 /* Compressor operations */
2755 /*----------------------------------------------------------------------------*/
2758 * Generate tables for mapping match offsets (actually, "adjusted" match
2759 * offsets) to offset slots.
2762 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
2764 u32 adjusted_offset = 0;
2768 for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
2771 if (adjusted_offset >= lzx_offset_slot_base[slot + 1] +
2772 LZX_OFFSET_ADJUSTMENT)
2774 c->offset_slot_tab_1[adjusted_offset] = slot;
2777 /* slots [30, 49] */
2778 for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
2779 adjusted_offset += (u32)1 << 14)
2781 if (adjusted_offset >= lzx_offset_slot_base[slot + 1] +
2782 LZX_OFFSET_ADJUSTMENT)
2784 c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
2789 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
2791 if (compression_level <= MAX_FAST_LEVEL) {
2792 if (lzx_is_16_bit(max_bufsize))
2793 return offsetof(struct lzx_compressor, hc_mf_16) +
2794 hc_matchfinder_size_16(max_bufsize);
2796 return offsetof(struct lzx_compressor, hc_mf_32) +
2797 hc_matchfinder_size_32(max_bufsize);
2799 if (lzx_is_16_bit(max_bufsize))
2800 return offsetof(struct lzx_compressor, bt_mf_16) +
2801 bt_matchfinder_size_16(max_bufsize);
2803 return offsetof(struct lzx_compressor, bt_mf_32) +
2804 bt_matchfinder_size_32(max_bufsize);
2808 /* Compute the amount of memory needed to allocate an LZX compressor. */
2810 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
2815 if (max_bufsize > LZX_MAX_WINDOW_SIZE)
2818 size += lzx_get_compressor_size(max_bufsize, compression_level);
2820 size += max_bufsize; /* account for in_buffer */
2824 /* Allocate an LZX compressor. */
2826 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
2827 bool destructive, void **c_ret)
2829 unsigned window_order;
2830 struct lzx_compressor *c;
2832 /* Validate the maximum buffer size and get the window order from it. */
2833 window_order = lzx_get_window_order(max_bufsize);
2834 if (window_order == 0)
2835 return WIMLIB_ERR_INVALID_PARAM;
2837 /* Allocate the compressor. */
2838 c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
2842 c->window_order = window_order;
2843 c->num_main_syms = lzx_get_num_main_syms(window_order);
2844 c->destructive = destructive;
2846 /* Allocate the buffer for preprocessed data if needed. */
2847 if (!c->destructive) {
2848 c->in_buffer = MALLOC(max_bufsize);
2853 if (compression_level <= MAX_FAST_LEVEL) {
2855 /* Fast compression: Use lazy parsing. */
2856 if (lzx_is_16_bit(max_bufsize))
2857 c->impl = lzx_compress_lazy_16;
2859 c->impl = lzx_compress_lazy_32;
2861 /* Scale max_search_depth and nice_match_length with the
2862 * compression level. */
2863 c->max_search_depth = (60 * compression_level) / 20;
2864 c->nice_match_length = (80 * compression_level) / 20;
2866 /* lzx_compress_lazy() needs max_search_depth >= 2 because it
2867 * halves the max_search_depth when attempting a lazy match, and
2868 * max_search_depth must be at least 1. */
2869 c->max_search_depth = max(c->max_search_depth, 2);
2872 /* Normal / high compression: Use near-optimal parsing. */
2873 if (lzx_is_16_bit(max_bufsize))
2874 c->impl = lzx_compress_near_optimal_16;
2876 c->impl = lzx_compress_near_optimal_32;
2878 /* Scale max_search_depth and nice_match_length with the
2879 * compression level. */
2880 c->max_search_depth = (24 * compression_level) / 50;
2881 c->nice_match_length = (48 * compression_level) / 50;
2883 /* Also scale num_optim_passes with the compression level. But
2884 * the more passes there are, the less they help --- so don't
2885 * add them linearly. */
2886 c->num_optim_passes = 1;
2887 c->num_optim_passes += (compression_level >= 45);
2888 c->num_optim_passes += (compression_level >= 70);
2889 c->num_optim_passes += (compression_level >= 100);
2890 c->num_optim_passes += (compression_level >= 150);
2891 c->num_optim_passes += (compression_level >= 200);
2892 c->num_optim_passes += (compression_level >= 300);
2894 /* max_search_depth must be at least 1. */
2895 c->max_search_depth = max(c->max_search_depth, 1);
2898 /* Prepare the offset => offset slot mapping. */
2899 lzx_init_offset_slot_tabs(c);
2907 return WIMLIB_ERR_NOMEM;
2910 /* Compress a buffer of data. */
2912 lzx_compress(const void *restrict in, size_t in_nbytes,
2913 void *restrict out, size_t out_nbytes_avail, void *restrict _c)
2915 struct lzx_compressor *c = _c;
2916 struct lzx_output_bitstream os;
2919 /* Don't bother trying to compress very small inputs. */
2923 /* If the compressor is in "destructive" mode, then we can directly
2924 * preprocess the input data. Otherwise, we need to copy it into an
2925 * internal buffer first. */
2926 if (!c->destructive) {
2927 memcpy(c->in_buffer, in, in_nbytes);
2931 /* Preprocess the input data. */
2932 lzx_preprocess((void *)in, in_nbytes);
2934 /* Initially, the previous Huffman codeword lengths are all zeroes. */
2936 memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
2938 /* Initialize the output bitstream. */
2939 lzx_init_output(&os, out, out_nbytes_avail);
2941 /* Call the compression level-specific compress() function. */
2942 (*c->impl)(c, in, in_nbytes, &os);
2944 /* Flush the output bitstream. */
2945 result = lzx_flush_output(&os);
2947 /* If the data did not compress to less than its original size and we
2948 * preprocessed the original buffer, then postprocess it to restore it
2949 * to its original state. */
2950 if (result == 0 && c->destructive)
2951 lzx_postprocess((void *)in, in_nbytes);
2953 /* Return the number of compressed bytes, or 0 if the input did not
2954 * compress to less than its original size. */
2958 /* Free an LZX compressor. */
2960 lzx_free_compressor(void *_c)
2962 struct lzx_compressor *c = _c;
2964 if (!c->destructive)
2969 const struct compressor_ops lzx_compressor_ops = {
2970 .get_needed_memory = lzx_get_needed_memory,
2971 .create_compressor = lzx_create_compressor,
2972 .compress = lzx_compress,
2973 .free_compressor = lzx_free_compressor,