4 * LZX compression routines
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
28 * This file contains a compressor for the LZX compression format, as used in
29 * the WIM file format.
34 * First, the primary reference for the LZX compression format is the
35 * specification released by Microsoft.
37 * Second, the comments in lzx-decompress.c provide some more information about
38 * the LZX compression format, including errors in the Microsoft specification.
40 * Do note that LZX shares many similarities with DEFLATE, the algorithm used by
41 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding,
42 * and certain other details are quite similar, such as the method for storing
43 * Huffman codes. However, some of the main differences are:
45 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
46 * compressible before attempting to compress it further.
47 * - LZX uses a "main" alphabet which combines literals and matches, with the
48 * match symbols containing a "length header" (giving all or part of the match
49 * length) and a "position slot" (giving, roughly speaking, the order of
50 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks; however it does have two types of
52 * dynamic Huffman blocks ("aligned offset" and "verbatim").
53 * - LZX has a minimum match length of 2 rather than 3.
54 * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
55 * queue of match offsets. This is very useful for certain types of files,
56 * such as binary files that have repeating records.
61 * There are actually two distinct overall algorithms implemented here. We
62 * shall refer to them as the "slow" algorithm and the "fast" algorithm. The
63 * "slow" algorithm spends more time compressing to achieve a higher compression
64 * ratio compared to the "fast" algorithm. More details are presented below.
69 * The "slow" algorithm to generate LZX-compressed data is roughly as follows:
71 * 1. Preprocess the input data to translate the targets of x86 call
72 * instructions to absolute offsets.
74 * 2. Build the suffix array and inverse suffix array for the input data. The
75 * suffix array contains the indices of all suffixes of the input data,
76 * sorted lexcographically by the corresponding suffixes. The "position" of
77 * a suffix is the index of that suffix in the original string, whereas the
78 * "rank" of a suffix is the index at which that suffix's position is found
79 * in the suffix array.
81 * 3. Build the longest common prefix array corresponding to the suffix array.
83 * 4. For each suffix, find the highest lower ranked suffix that has a lower
84 * position, the lowest higher ranked suffix that has a lower position, and
85 * the length of the common prefix shared between each. This information is
86 * later used to link suffix ranks into a doubly-linked list for searching
89 * 5. Set a default cost model for matches/literals.
91 * 6. Determine the lowest cost sequence of LZ77 matches ((offset, length)
92 * pairs) and literal bytes to divide the input into. Raw match-finding is
93 * done by searching the suffix array using a linked list to avoid
94 * considering any suffixes that start after the current position. Each run
95 * of the match-finder returns the approximate lowest-cost longest match as
96 * well as any shorter matches that have even lower approximate costs. Each
97 * such run also adds the suffix rank of the current position into the linked
98 * list being used to search the suffix array. Parsing, or match-choosing,
99 * is solved as a minimum-cost path problem using a forward "optimal parsing"
100 * algorithm based on the Deflate encoder from 7-Zip. This algorithm moves
101 * forward calculating the minimum cost to reach each byte until either a
102 * very long match is found or until a position is found at which no matches
105 * 7. Build the Huffman codes needed to output the matches/literals.
107 * 8. Up to a certain number of iterations, use the resulting Huffman codes to
108 * refine a cost model and go back to Step #6 to determine an improved
109 * sequence of matches and literals.
111 * 9. Output the resulting block using the match/literal sequences and the
112 * Huffman codes that were computed for the block.
114 * Note: the algorithm does not yet attempt to split the input into multiple LZX
115 * blocks, instead using a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
120 * The fast algorithm (and the only one available in wimlib v1.5.1 and earlier)
121 * spends much less time on the main bottlenecks of the compression process ---
122 * that is, the match finding and match choosing. Matches are found and chosen
123 * with hash chains using a greedy parse with one position of look-ahead. No
124 * block splitting is done; only compressing the full input into an aligned
125 * offset block is considered.
130 * The old API (retained for backward compatibility) consists of just one
133 * wimlib_lzx_compress()
135 * The new compressor has more potential parameters and needs more memory, so
136 * the new API ties up memory allocations and compression parameters into a
139 * wimlib_lzx_alloc_context()
140 * wimlib_lzx_compress2()
141 * wimlib_lzx_free_context()
142 * wimlib_lzx_set_default_params()
144 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() are designed to
145 * compress an in-memory buffer of up to the window size, which can be any power
146 * of two between 2^15 and 2^21 inclusively. However, by default, the WIM
147 * format uses 2^15, and this is seemingly the only value that is compatible
148 * with WIMGAPI. In any case, the window is not a true "sliding window" since
149 * no data is ever "slid out" of the window. This is needed for the WIM format,
150 * which is designed such that chunks may be randomly accessed.
152 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() return 0 if the data
153 * could not be compressed to less than the size of the uncompressed data.
154 * Again, this is suitable for the WIM format, which stores such data chunks
157 * The functions in this LZX compression API are exported from the library,
158 * although with the possible exception of wimlib_lzx_set_default_params(), this
159 * is only in case other programs happen to have uses for it other than WIM
160 * reading/writing as already handled through the rest of the library.
165 * Acknowledgments to several open-source projects and research papers that made
166 * it possible to implement this code:
168 * - divsufsort (author: Yuta Mori), for the suffix array construction code,
169 * located in a separate directory (divsufsort/).
171 * - "Linear-Time Longest-Common-Prefix Computation in Suffix Arrays and Its
172 * Applications" (Kasai et al. 2001), for the LCP array computation.
174 * - "LPF computation revisited" (Crochemore et al. 2009) for the prev and next
175 * array computations.
177 * - 7-Zip (author: Igor Pavlov) for the algorithm for forward optimal parsing
180 * - zlib (author: Jean-loup Gailly and Mark Adler), for the hash table
181 * match-finding algorithm (used in lz77.c).
183 * - lzx-compress (author: Matthew T. Russotto), on which some parts of this
184 * code were originally based.
192 #include "wimlib/compressor_ops.h"
193 #include "wimlib/compress_common.h"
194 #include "wimlib/endianness.h"
195 #include "wimlib/error.h"
196 #include "wimlib/lzx.h"
197 #include "wimlib/util.h"
202 #ifdef ENABLE_LZX_DEBUG
203 # include "wimlib/decompress_common.h"
206 #include "divsufsort/divsufsort.h"
208 typedef u32 block_cost_t;
209 #define INFINITE_BLOCK_COST ((block_cost_t)~0U)
211 #define LZX_OPTIM_ARRAY_SIZE 4096
213 #define LZX_DIV_BLOCK_SIZE 32768
215 #define LZX_MAX_CACHE_PER_POS 10
217 /* Codewords for the LZX main, length, and aligned offset Huffman codes */
218 struct lzx_codewords {
219 u16 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
220 u16 len[LZX_LENCODE_NUM_SYMBOLS];
221 u16 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
224 /* Codeword lengths (in bits) for the LZX main, length, and aligned offset
227 * A 0 length means the codeword has zero frequency.
230 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
231 u8 len[LZX_LENCODE_NUM_SYMBOLS];
232 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
235 /* Costs for the LZX main, length, and aligned offset Huffman symbols.
237 * If a codeword has zero frequency, it must still be assigned some nonzero cost
238 * --- generally a high cost, since even if it gets used in the next iteration,
239 * it probably will not be used very times. */
241 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
242 u8 len[LZX_LENCODE_NUM_SYMBOLS];
243 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
246 /* The LZX main, length, and aligned offset Huffman codes */
248 struct lzx_codewords codewords;
249 struct lzx_lens lens;
252 /* Tables for tallying symbol frequencies in the three LZX alphabets */
254 input_idx_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
255 input_idx_t len[LZX_LENCODE_NUM_SYMBOLS];
256 input_idx_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
259 /* LZX intermediate match/literal format */
263 * 31 1 if a match, 0 if a literal.
265 * 30-25 position slot. This can be at most 50, so it will fit in 6
268 * 8-24 position footer. This is the offset of the real formatted
269 * offset from the position base. This can be at most 17 bits
270 * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
272 * 0-7 length of match, minus 2. This can be at most
273 * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
277 /* Raw LZ match/literal format: just a length and offset.
279 * The length is the number of bytes of the match, and the offset is the number
280 * of bytes back in the input the match is from the current position.
282 * If @len < LZX_MIN_MATCH_LEN, then it's really just a literal byte and @offset is
289 /* Specification for an LZX block. */
290 struct lzx_block_spec {
292 /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
296 /* 0-based position in the window at which this block starts. */
297 input_idx_t window_pos;
299 /* The number of bytes of uncompressed data this block represents. */
300 input_idx_t block_size;
302 /* The position in the 'chosen_matches' array in the `struct
303 * lzx_compressor' at which the match/literal specifications for
304 * this block begin. */
305 input_idx_t chosen_matches_start_pos;
307 /* The number of match/literal specifications for this block. */
308 input_idx_t num_chosen_matches;
310 /* Huffman codes for this block. */
311 struct lzx_codes codes;
315 * An array of these structures is used during the match-choosing algorithm.
316 * They correspond to consecutive positions in the window and are used to keep
317 * track of the cost to reach each position, and the match/literal choices that
318 * need to be chosen to reach that position.
321 /* The approximate minimum cost, in bits, to reach this position in the
322 * window which has been found so far. */
325 /* The union here is just for clarity, since the fields are used in two
326 * slightly different ways. Initially, the @prev structure is filled in
327 * first, and links go from later in the window to earlier in the
328 * window. Later, @next structure is filled in and links go from
329 * earlier in the window to later in the window. */
332 /* Position of the start of the match or literal that
333 * was taken to get to this position in the approximate
334 * minimum-cost parse. */
337 /* Offset (as in an LZ (length, offset) pair) of the
338 * match or literal that was taken to get to this
339 * position in the approximate minimum-cost parse. */
340 input_idx_t match_offset;
343 /* Position at which the match or literal starting at
344 * this position ends in the minimum-cost parse. */
347 /* Offset (as in an LZ (length, offset) pair) of the
348 * match or literal starting at this position in the
349 * approximate minimum-cost parse. */
350 input_idx_t match_offset;
354 /* The match offset LRU queue that will exist when the approximate
355 * minimum-cost path to reach this position is taken. */
356 struct lzx_lru_queue queue;
359 /* Suffix array link */
361 /* Rank of highest ranked suffix that has rank lower than the suffix
362 * corresponding to this structure and either has a lower position
363 * (initially) or has a position lower than the highest position at
364 * which matches have been searched for so far, or -1 if there is no
368 /* Rank of lowest ranked suffix that has rank greater than the suffix
369 * corresponding to this structure and either has a lower position
370 * (intially) or has a position lower than the highest position at which
371 * matches have been searched for so far, or -1 if there is no such
375 /* Length of longest common prefix between the suffix corresponding to
376 * this structure and the suffix with rank @prev, or 0 if @prev is -1.
380 /* Length of longest common prefix between the suffix corresponding to
381 * this structure and the suffix with rank @next, or 0 if @next is -1.
386 /* State of the LZX compressor. */
387 struct lzx_compressor {
389 /* The parameters that were used to create the compressor. */
390 struct wimlib_lzx_compressor_params params;
392 /* The buffer of data to be compressed.
394 * 0xe8 byte preprocessing is done directly on the data here before
395 * further compression.
397 * Note that this compressor does *not* use a real sliding window!!!!
398 * It's not needed in the WIM format, since every chunk is compressed
399 * independently. This is by design, to allow random access to the
402 * We reserve a few extra bytes to potentially allow reading off the end
403 * of the array in the match-finding code for optimization purposes.
407 /* Number of bytes of data to be compressed, which is the number of
408 * bytes of data in @window that are actually valid. */
409 input_idx_t window_size;
411 /* Allocated size of the @window. */
412 input_idx_t max_window_size;
414 /* Number of symbols in the main alphabet (depends on the
415 * @max_window_size since it determines the maximum allowed offset). */
416 unsigned num_main_syms;
418 /* The current match offset LRU queue. */
419 struct lzx_lru_queue queue;
421 /* Space for the sequences of matches/literals that were chosen for each
423 struct lzx_match *chosen_matches;
425 /* Information about the LZX blocks the preprocessed input was divided
427 struct lzx_block_spec *block_specs;
429 /* Number of LZX blocks the input was divided into; a.k.a. the number of
430 * elements of @block_specs that are valid. */
433 /* This is simply filled in with zeroes and used to avoid special-casing
434 * the output of the first compressed Huffman code, which conceptually
435 * has a delta taken from a code with all symbols having zero-length
437 struct lzx_codes zero_codes;
439 /* The current cost model. */
440 struct lzx_costs costs;
442 /* Fast algorithm only: Array of hash table links. */
443 input_idx_t *prev_tab;
445 /* Suffix array for window.
446 * This is a mapping from suffix rank to suffix position. */
449 /* Inverse suffix array for window.
450 * This is a mapping from suffix position to suffix rank.
451 * If 0 <= r < window_size, then ISA[SA[r]] == r. */
454 /* Longest common prefix array corresponding to the suffix array SA.
455 * LCP[i] is the length of the longest common prefix between the
456 * suffixes with positions SA[i - 1] and SA[i]. LCP[0] is undefined.
460 /* Suffix array links.
462 * During a linear scan of the input string to find matches, this array
463 * used to keep track of which rank suffixes in the suffix array appear
464 * before the current position. Instead of searching in the original
465 * suffix array, scans for matches at a given position traverse a linked
466 * list containing only suffixes that appear before that position. */
467 struct salink *salink;
469 /* Position in window of next match to return. */
470 input_idx_t match_window_pos;
472 /* The match-finder shall ensure the length of matches does not exceed
473 * this position in the input. */
474 input_idx_t match_window_end;
476 /* Matches found by the match-finder are cached in the following array
477 * to achieve a slight speedup when the same matches are needed on
478 * subsequent passes. This is suboptimal because different matches may
479 * be preferred with different cost models, but seems to be a worthwhile
481 struct raw_match *cached_matches;
482 unsigned cached_matches_pos;
485 /* Slow algorithm only: Temporary space used for match-choosing
488 * The size of this array must be at least LZX_MAX_MATCH_LEN but
489 * otherwise is arbitrary. More space simply allows the match-choosing
490 * algorithm to potentially find better matches (depending on the input,
492 struct lzx_optimal *optimum;
494 /* Slow algorithm only: Variables used by the match-choosing algorithm.
496 * When matches have been chosen, optimum_cur_idx is set to the position
497 * in the window of the next match/literal to return and optimum_end_idx
498 * is set to the position in the window at the end of the last
499 * match/literal to return. */
504 /* Returns the LZX position slot that corresponds to a given match offset,
505 * taking into account the recent offset queue and updating it if the offset is
508 lzx_get_position_slot(unsigned offset, struct lzx_lru_queue *queue)
510 unsigned position_slot;
512 /* See if the offset was recently used. */
513 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
514 if (offset == queue->R[i]) {
517 /* Bring the repeat offset to the front of the
518 * queue. Note: this is, in fact, not a real
519 * LRU queue because repeat matches are simply
520 * swapped to the front. */
521 swap(queue->R[0], queue->R[i]);
523 /* The resulting position slot is simply the first index
524 * at which the offset was found in the queue. */
529 /* The offset was not recently used; look up its real position slot. */
530 position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
532 /* Bring the new offset to the front of the queue. */
533 for (unsigned i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
534 queue->R[i] = queue->R[i - 1];
535 queue->R[0] = offset;
537 return position_slot;
540 /* Build the main, length, and aligned offset Huffman codes used in LZX.
542 * This takes as input the frequency tables for each code and produces as output
543 * a set of tables that map symbols to codewords and codeword lengths. */
545 lzx_make_huffman_codes(const struct lzx_freqs *freqs,
546 struct lzx_codes *codes,
547 unsigned num_main_syms)
549 make_canonical_huffman_code(num_main_syms,
550 LZX_MAX_MAIN_CODEWORD_LEN,
553 codes->codewords.main);
555 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
556 LZX_MAX_LEN_CODEWORD_LEN,
559 codes->codewords.len);
561 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
562 LZX_MAX_ALIGNED_CODEWORD_LEN,
565 codes->codewords.aligned);
569 * Output an LZX match.
571 * @out: The bitstream to write the match to.
572 * @block_type: The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM)
574 * @codes: Pointer to a structure that contains the codewords for the
575 * main, length, and aligned offset Huffman codes.
578 lzx_write_match(struct output_bitstream *out, int block_type,
579 struct lzx_match match, const struct lzx_codes *codes)
581 /* low 8 bits are the match length minus 2 */
582 unsigned match_len_minus_2 = match.data & 0xff;
583 /* Next 17 bits are the position footer */
584 unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
585 /* Next 6 bits are the position slot. */
586 unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
589 unsigned main_symbol;
590 unsigned num_extra_bits;
591 unsigned verbatim_bits;
592 unsigned aligned_bits;
594 /* If the match length is less than MIN_MATCH_LEN (= 2) +
595 * NUM_PRIMARY_LENS (= 7), the length header contains
596 * the match length minus MIN_MATCH_LEN, and there is no
599 * Otherwise, the length header contains
600 * NUM_PRIMARY_LENS, and the length footer contains
601 * the match length minus NUM_PRIMARY_LENS minus
603 if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
604 len_header = match_len_minus_2;
605 /* No length footer-- mark it with a special
607 len_footer = (unsigned)(-1);
609 len_header = LZX_NUM_PRIMARY_LENS;
610 len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
613 /* Combine the position slot with the length header into a single symbol
614 * that will be encoded with the main code.
616 * The actual main symbol is offset by LZX_NUM_CHARS because values
617 * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
619 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
621 /* Output main symbol. */
622 bitstream_put_bits(out, codes->codewords.main[main_symbol],
623 codes->lens.main[main_symbol]);
625 /* If there is a length footer, output it using the
626 * length Huffman code. */
627 if (len_footer != (unsigned)(-1)) {
628 bitstream_put_bits(out, codes->codewords.len[len_footer],
629 codes->lens.len[len_footer]);
632 num_extra_bits = lzx_get_num_extra_bits(position_slot);
634 /* For aligned offset blocks with at least 3 extra bits, output the
635 * verbatim bits literally, then the aligned bits encoded using the
636 * aligned offset code. Otherwise, only the verbatim bits need to be
638 if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
640 verbatim_bits = position_footer >> 3;
641 bitstream_put_bits(out, verbatim_bits,
644 aligned_bits = (position_footer & 7);
645 bitstream_put_bits(out,
646 codes->codewords.aligned[aligned_bits],
647 codes->lens.aligned[aligned_bits]);
649 /* verbatim bits is the same as the position
650 * footer, in this case. */
651 bitstream_put_bits(out, position_footer, num_extra_bits);
656 lzx_build_precode(const u8 lens[restrict],
657 const u8 prev_lens[restrict],
658 const unsigned num_syms,
659 input_idx_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
660 u8 output_syms[restrict num_syms],
661 u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
662 u16 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
663 unsigned *num_additional_bits_ret)
665 memset(precode_freqs, 0,
666 LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
668 /* Since the code word lengths use a form of RLE encoding, the goal here
669 * is to find each run of identical lengths when going through them in
670 * symbol order (including runs of length 1). For each run, as many
671 * lengths are encoded using RLE as possible, and the rest are output
674 * output_syms[] will be filled in with the length symbols that will be
675 * output, including RLE codes, not yet encoded using the precode.
677 * cur_run_len keeps track of how many code word lengths are in the
678 * current run of identical lengths. */
679 unsigned output_syms_idx = 0;
680 unsigned cur_run_len = 1;
681 unsigned num_additional_bits = 0;
682 for (unsigned i = 1; i <= num_syms; i++) {
684 if (i != num_syms && lens[i] == lens[i - 1]) {
685 /* Still in a run--- keep going. */
690 /* Run ended! Check if it is a run of zeroes or a run of
693 /* The symbol that was repeated in the run--- not to be confused
694 * with the length *of* the run (cur_run_len) */
695 unsigned len_in_run = lens[i - 1];
697 if (len_in_run == 0) {
698 /* A run of 0's. Encode it in as few length
699 * codes as we can. */
701 /* The magic length 18 indicates a run of 20 + n zeroes,
702 * where n is an uncompressed literal 5-bit integer that
703 * follows the magic length. */
704 while (cur_run_len >= 20) {
705 unsigned additional_bits;
707 additional_bits = min(cur_run_len - 20, 0x1f);
708 num_additional_bits += 5;
710 output_syms[output_syms_idx++] = 18;
711 output_syms[output_syms_idx++] = additional_bits;
712 cur_run_len -= 20 + additional_bits;
715 /* The magic length 17 indicates a run of 4 + n zeroes,
716 * where n is an uncompressed literal 4-bit integer that
717 * follows the magic length. */
718 while (cur_run_len >= 4) {
719 unsigned additional_bits;
721 additional_bits = min(cur_run_len - 4, 0xf);
722 num_additional_bits += 4;
724 output_syms[output_syms_idx++] = 17;
725 output_syms[output_syms_idx++] = additional_bits;
726 cur_run_len -= 4 + additional_bits;
731 /* A run of nonzero lengths. */
733 /* The magic length 19 indicates a run of 4 + n
734 * nonzeroes, where n is a literal bit that follows the
735 * magic length, and where the value of the lengths in
736 * the run is given by an extra length symbol, encoded
737 * with the precode, that follows the literal bit.
739 * The extra length symbol is encoded as a difference
740 * from the length of the codeword for the first symbol
741 * in the run in the previous code.
743 while (cur_run_len >= 4) {
744 unsigned additional_bits;
747 additional_bits = (cur_run_len > 4);
748 num_additional_bits += 1;
749 delta = (signed char)prev_lens[i - cur_run_len] -
750 (signed char)len_in_run;
754 precode_freqs[(unsigned char)delta]++;
755 output_syms[output_syms_idx++] = 19;
756 output_syms[output_syms_idx++] = additional_bits;
757 output_syms[output_syms_idx++] = delta;
758 cur_run_len -= 4 + additional_bits;
762 /* Any remaining lengths in the run are outputted without RLE,
763 * as a difference from the length of that codeword in the
765 while (cur_run_len > 0) {
768 delta = (signed char)prev_lens[i - cur_run_len] -
769 (signed char)len_in_run;
773 precode_freqs[(unsigned char)delta]++;
774 output_syms[output_syms_idx++] = delta;
781 /* Build the precode from the frequencies of the length symbols. */
783 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
784 LZX_MAX_PRE_CODEWORD_LEN,
785 precode_freqs, precode_lens,
788 *num_additional_bits_ret = num_additional_bits;
790 return output_syms_idx;
794 * Writes a compressed Huffman code to the output, preceded by the precode for
797 * The Huffman code is represented in the output as a series of path lengths
798 * from which the canonical Huffman code can be reconstructed. The path lengths
799 * themselves are compressed using a separate Huffman code, the precode, which
800 * consists of LZX_PRECODE_NUM_SYMBOLS (= 20) symbols that cover all possible
801 * code lengths, plus extra codes for repeated lengths. The path lengths of the
802 * precode precede the path lengths of the larger code and are uncompressed,
803 * consisting of 20 entries of 4 bits each.
805 * @out: Bitstream to write the code to.
806 * @lens: The code lengths for the Huffman code, indexed by symbol.
807 * @prev_lens: Code lengths for this Huffman code, indexed by symbol,
808 * in the *previous block*, or all zeroes if this is the
810 * @num_syms: The number of symbols in the code.
813 lzx_write_compressed_code(struct output_bitstream *out,
814 const u8 lens[restrict],
815 const u8 prev_lens[restrict],
818 input_idx_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
819 u8 output_syms[num_syms];
820 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
821 u16 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
823 unsigned num_output_syms;
827 num_output_syms = lzx_build_precode(lens,
836 /* Write the lengths of the precode codes to the output. */
837 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
838 bitstream_put_bits(out, precode_lens[i],
839 LZX_PRECODE_ELEMENT_SIZE);
841 /* Write the length symbols, encoded with the precode, to the output. */
843 for (i = 0; i < num_output_syms; ) {
844 precode_sym = output_syms[i++];
846 bitstream_put_bits(out, precode_codewords[precode_sym],
847 precode_lens[precode_sym]);
848 switch (precode_sym) {
850 bitstream_put_bits(out, output_syms[i++], 4);
853 bitstream_put_bits(out, output_syms[i++], 5);
856 bitstream_put_bits(out, output_syms[i++], 1);
857 bitstream_put_bits(out,
858 precode_codewords[output_syms[i]],
859 precode_lens[output_syms[i]]);
869 * Writes all compressed matches and literal bytes in an LZX block to the the
873 * The output bitstream.
875 * The type of the block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM).
877 * The array of matches/literals that will be output (length @match_count).
879 * Number of matches/literals to be output.
881 * Pointer to a structure that contains the codewords for the main, length,
882 * and aligned offset Huffman codes.
885 lzx_write_matches_and_literals(struct output_bitstream *ostream,
887 const struct lzx_match match_tab[],
888 unsigned match_count,
889 const struct lzx_codes *codes)
891 for (unsigned i = 0; i < match_count; i++) {
892 struct lzx_match match = match_tab[i];
894 /* High bit of the match indicates whether the match is an
895 * actual match (1) or a literal uncompressed byte (0) */
896 if (match.data & 0x80000000) {
898 lzx_write_match(ostream, block_type,
902 bitstream_put_bits(ostream,
903 codes->codewords.main[match.data],
904 codes->lens.main[match.data]);
910 lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
912 #ifdef ENABLE_LZX_DEBUG
915 for (i = 0; i < num_main_syms; i++)
916 LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
918 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
919 LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
921 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
922 LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
924 const unsigned tablebits = 10;
925 u16 decode_table[(1 << tablebits) +
926 (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
927 _aligned_attribute(DECODE_TABLE_ALIGNMENT);
928 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
930 min(tablebits, LZX_MAINCODE_TABLEBITS),
932 LZX_MAX_MAIN_CODEWORD_LEN));
933 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
934 LZX_LENCODE_NUM_SYMBOLS,
935 min(tablebits, LZX_LENCODE_TABLEBITS),
937 LZX_MAX_LEN_CODEWORD_LEN));
938 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
939 LZX_ALIGNEDCODE_NUM_SYMBOLS,
940 min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
942 LZX_MAX_ALIGNED_CODEWORD_LEN));
943 #endif /* ENABLE_LZX_DEBUG */
946 /* Write an LZX aligned offset or verbatim block to the output. */
948 lzx_write_compressed_block(int block_type,
950 unsigned max_window_size,
951 unsigned num_main_syms,
952 struct lzx_match * chosen_matches,
953 unsigned num_chosen_matches,
954 const struct lzx_codes * codes,
955 const struct lzx_codes * prev_codes,
956 struct output_bitstream * ostream)
960 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
961 block_type == LZX_BLOCKTYPE_VERBATIM);
962 lzx_assert_codes_valid(codes, num_main_syms);
964 /* The first three bits indicate the type of block and are one of the
965 * LZX_BLOCKTYPE_* constants. */
966 bitstream_put_bits(ostream, block_type, 3);
968 /* Output the block size.
970 * The original LZX format seemed to always encode the block size in 3
971 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
972 * uses the first bit to indicate whether the block is the default size
973 * (32768) or a different size given explicitly by the next 16 bits.
975 * By default, this compressor uses a window size of 32768 and therefore
976 * follows the WIMGAPI behavior. However, this compressor also supports
977 * window sizes greater than 32768 bytes, which do not appear to be
978 * supported by WIMGAPI. In such cases, we retain the default size bit
979 * to mean a size of 32768 bytes but output non-default block size in 24
980 * bits rather than 16. The compatibility of this behavior is unknown
981 * because WIMs created with chunk size greater than 32768 can seemingly
982 * only be opened by wimlib anyway. */
983 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
984 bitstream_put_bits(ostream, 1, 1);
986 bitstream_put_bits(ostream, 0, 1);
988 if (max_window_size >= 65536)
989 bitstream_put_bits(ostream, block_size >> 16, 8);
991 bitstream_put_bits(ostream, block_size, 16);
994 /* Write out lengths of the main code. Note that the LZX specification
995 * incorrectly states that the aligned offset code comes after the
996 * length code, but in fact it is the very first code to be written
997 * (before the main code). */
998 if (block_type == LZX_BLOCKTYPE_ALIGNED)
999 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1000 bitstream_put_bits(ostream, codes->lens.aligned[i],
1001 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1003 LZX_DEBUG("Writing main code...");
1005 /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
1006 * the main code, which are the codewords for literal bytes. */
1007 lzx_write_compressed_code(ostream,
1009 prev_codes->lens.main,
1012 /* Write the precode and lengths for the rest of the main code, which
1013 * are the codewords for match headers. */
1014 lzx_write_compressed_code(ostream,
1015 codes->lens.main + LZX_NUM_CHARS,
1016 prev_codes->lens.main + LZX_NUM_CHARS,
1017 num_main_syms - LZX_NUM_CHARS);
1019 LZX_DEBUG("Writing length code...");
1021 /* Write the precode and lengths for the length code. */
1022 lzx_write_compressed_code(ostream,
1024 prev_codes->lens.len,
1025 LZX_LENCODE_NUM_SYMBOLS);
1027 LZX_DEBUG("Writing matches and literals...");
1029 /* Write the actual matches and literals. */
1030 lzx_write_matches_and_literals(ostream, block_type,
1031 chosen_matches, num_chosen_matches,
1034 LZX_DEBUG("Done writing block.");
1037 /* Write out the LZX blocks that were computed. */
1039 lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
1042 const struct lzx_codes *prev_codes = &ctx->zero_codes;
1043 for (unsigned i = 0; i < ctx->num_blocks; i++) {
1044 const struct lzx_block_spec *spec = &ctx->block_specs[i];
1046 LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
1047 i + 1, ctx->num_blocks,
1048 spec->block_type, spec->block_size,
1049 spec->num_chosen_matches);
1051 lzx_write_compressed_block(spec->block_type,
1053 ctx->max_window_size,
1055 &ctx->chosen_matches[spec->chosen_matches_start_pos],
1056 spec->num_chosen_matches,
1061 prev_codes = &spec->codes;
1065 /* Constructs an LZX match from a literal byte and updates the main code symbol
1068 lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
1074 /* Constructs an LZX match from an offset and a length, and updates the LRU
1075 * queue and the frequency of symbols in the main, length, and aligned offset
1076 * alphabets. The return value is a 32-bit number that provides the match in an
1077 * intermediate representation documented below. */
1079 lzx_tally_match(unsigned match_len, unsigned match_offset,
1080 struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
1082 unsigned position_slot;
1083 unsigned position_footer;
1085 unsigned main_symbol;
1086 unsigned len_footer;
1087 unsigned adjusted_match_len;
1089 LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
1091 /* The match offset shall be encoded as a position slot (itself encoded
1092 * as part of the main symbol) and a position footer. */
1093 position_slot = lzx_get_position_slot(match_offset, queue);
1094 position_footer = (match_offset + LZX_OFFSET_OFFSET) &
1095 ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
1097 /* The match length shall be encoded as a length header (itself encoded
1098 * as part of the main symbol) and an optional length footer. */
1099 adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
1100 if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
1101 /* No length footer needed. */
1102 len_header = adjusted_match_len;
1104 /* Length footer needed. It will be encoded using the length
1106 len_header = LZX_NUM_PRIMARY_LENS;
1107 len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
1108 freqs->len[len_footer]++;
1111 /* Account for the main symbol. */
1112 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1114 freqs->main[main_symbol]++;
1116 /* In an aligned offset block, 3 bits of the position footer are output
1117 * as an aligned offset symbol. Account for this, although we may
1118 * ultimately decide to output the block as verbatim. */
1120 /* The following check is equivalent to:
1122 * if (lzx_extra_bits[position_slot] >= 3)
1124 * Note that this correctly excludes position slots that correspond to
1125 * recent offsets. */
1126 if (position_slot >= 8)
1127 freqs->aligned[position_footer & 7]++;
1129 /* Pack the position slot, position footer, and match length into an
1130 * intermediate representation. See `struct lzx_match' for details.
1132 LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
1133 LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
1134 LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
1136 LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
1137 LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
1138 LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
1140 (position_slot << 25) |
1141 (position_footer << 8) |
1142 (adjusted_match_len);
1145 struct lzx_record_ctx {
1146 struct lzx_freqs freqs;
1147 struct lzx_lru_queue queue;
1148 struct lzx_match *matches;
1152 lzx_record_match(unsigned len, unsigned offset, void *_ctx)
1154 struct lzx_record_ctx *ctx = _ctx;
1156 (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
1160 lzx_record_literal(u8 lit, void *_ctx)
1162 struct lzx_record_ctx *ctx = _ctx;
1164 (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
1167 /* Returns the cost, in bits, to output a literal byte using the specified cost
1170 lzx_literal_cost(u8 c, const struct lzx_costs * costs)
1172 return costs->main[c];
1175 /* Given a (length, offset) pair that could be turned into a valid LZX match as
1176 * well as costs for the codewords in the main, length, and aligned Huffman
1177 * codes, return the approximate number of bits it will take to represent this
1178 * match in the compressed output. Take into account the match offset LRU
1179 * queue and optionally update it. */
1181 lzx_match_cost(unsigned length, unsigned offset, const struct lzx_costs *costs,
1182 struct lzx_lru_queue *queue)
1184 unsigned position_slot;
1185 unsigned len_header, main_symbol;
1188 position_slot = lzx_get_position_slot(offset, queue);
1190 len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1191 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1193 /* Account for main symbol. */
1194 cost += costs->main[main_symbol];
1196 /* Account for extra position information. */
1197 unsigned num_extra_bits = lzx_get_num_extra_bits(position_slot);
1198 if (num_extra_bits >= 3) {
1199 cost += num_extra_bits - 3;
1200 cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1202 cost += num_extra_bits;
1205 /* Account for extra length information. */
1206 if (len_header == LZX_NUM_PRIMARY_LENS)
1207 cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1213 /* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
1214 * Unlike lzx_match_cost() which does a true cost evaluation, this simply
1215 * prioritize matches based on their offset. */
1217 lzx_match_cost_fast(unsigned offset, const struct lzx_lru_queue *queue)
1219 /* It seems well worth it to take the time to give priority to recently
1221 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
1222 if (offset == queue->R[i])
1225 BUILD_BUG_ON(LZX_MAX_WINDOW_SIZE >= (block_cost_t)~0U);
1229 /* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
1232 * The cost model and codeword lengths are almost the same thing, but the
1233 * Huffman codewords with length 0 correspond to symbols with zero frequency
1234 * that still need to be assigned actual costs. The specific values assigned
1235 * are arbitrary, but they should be fairly high (near the maximum codeword
1236 * length) to take into account the fact that uses of these symbols are expected
1239 lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
1242 unsigned num_main_syms = ctx->num_main_syms;
1245 for (i = 0; i < num_main_syms; i++) {
1246 ctx->costs.main[i] = lens->main[i];
1247 if (ctx->costs.main[i] == 0)
1248 ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
1252 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1253 ctx->costs.len[i] = lens->len[i];
1254 if (ctx->costs.len[i] == 0)
1255 ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
1258 /* Aligned offset code */
1259 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1260 ctx->costs.aligned[i] = lens->aligned[i];
1261 if (ctx->costs.aligned[i] == 0)
1262 ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
1266 /* Advance the suffix array match-finder to the next position. */
1268 lzx_lz_update_salink(input_idx_t i,
1269 const input_idx_t SA[restrict],
1270 const input_idx_t ISA[restrict],
1271 struct salink link[restrict])
1273 /* r = Rank of the suffix at the current position. */
1274 const input_idx_t r = ISA[i];
1276 /* next = rank of LOWEST ranked suffix that is ranked HIGHER than the
1277 * current suffix AND has a LOWER position, or -1 if none exists. */
1278 const input_idx_t next = link[r].next;
1280 /* prev = rank of HIGHEST ranked suffix that is ranked LOWER than the
1281 * current suffix AND has a LOWER position, or -1 if none exists. */
1282 const input_idx_t prev = link[r].prev;
1284 /* Link the suffix at the current position into the linked list that
1285 * contains all suffixes in the suffix array that are appear at or
1286 * before the current position, sorted by rank.
1288 * Save the values of all fields we overwrite so that rollback is
1290 if (next != (input_idx_t)~0U) {
1292 link[next].prev = r;
1293 link[next].lcpprev = link[r].lcpnext;
1296 if (prev != (input_idx_t)~0U) {
1298 link[prev].next = r;
1299 link[prev].lcpnext = link[r].lcpprev;
1304 * Use the suffix array match-finder to retrieve a list of LZ matches at the
1307 * [in] @i Current position in the window.
1308 * [in] @SA Suffix array for the window.
1309 * [in] @ISA Inverse suffix array for the window.
1310 * [inout] @link Suffix array links used internally by the match-finder.
1311 * [out] @matches The (length, offset) pairs of the resulting matches will
1312 * be written here, sorted in decreasing order by
1313 * length. All returned lengths will be unique.
1314 * [in] @queue Recently used match offsets, used when evaluating the
1316 * [in] @min_match_len Minimum match length to return.
1317 * [in] @max_matches_to_consider Maximum number of matches to consider at
1319 * [in] @max_matches_to_return Maximum number of matches to return.
1321 * The return value is the number of matches found and written to @matches.
1324 lzx_lz_get_matches(const input_idx_t i,
1325 const input_idx_t SA[const restrict],
1326 const input_idx_t ISA[const restrict],
1327 struct salink link[const restrict],
1328 struct raw_match matches[const restrict],
1329 const struct lzx_lru_queue * const restrict queue,
1330 const unsigned min_match_len,
1331 const u32 max_matches_to_consider,
1332 const u32 max_matches_to_return)
1334 /* r = Rank of the suffix at the current position. */
1335 const input_idx_t r = ISA[i];
1337 /* Prepare for searching the current position. */
1338 lzx_lz_update_salink(i, SA, ISA, link);
1340 /* L = rank of next suffix to the left;
1341 * R = rank of next suffix to the right;
1342 * lenL = length of match between current position and the suffix with rank L;
1343 * lenR = length of match between current position and the suffix with rank R.
1345 * This is left and right relative to the rank of the current suffix.
1346 * Since the suffixes in the suffix array are sorted, the longest
1347 * matches are immediately to the left and right (using the linked list
1348 * to ignore all suffixes that occur later in the window). The match
1349 * length decreases the farther left and right we go. We shall keep the
1350 * length on both sides in sync in order to choose the lowest-cost match
1353 input_idx_t L = link[r].prev;
1354 input_idx_t R = link[r].next;
1355 input_idx_t lenL = link[r].lcpprev;
1356 input_idx_t lenR = link[r].lcpnext;
1358 /* nmatches = number of matches found so far. */
1359 unsigned nmatches = 0;
1361 /* best_cost = cost of lowest-cost match found so far.
1363 * We keep track of this so that we can ignore shorter matches that do
1364 * not have lower costs than a longer matches already found.
1366 block_cost_t best_cost = INFINITE_BLOCK_COST;
1368 /* count_remaining = maximum number of possible matches remaining to be
1370 u32 count_remaining = max_matches_to_consider;
1372 /* pending = match currently being considered for a specific length. */
1373 struct raw_match pending;
1374 block_cost_t pending_cost;
1376 while (lenL >= min_match_len || lenR >= min_match_len)
1379 pending_cost = INFINITE_BLOCK_COST;
1383 if (lenL >= min_match_len && lenL >= lenR) {
1386 if (--count_remaining == 0)
1387 goto out_save_pending;
1389 input_idx_t offset = i - SA[L];
1391 /* Save match if it has smaller cost. */
1392 cost = lzx_match_cost_fast(offset, queue);
1393 if (cost < pending_cost) {
1394 pending.offset = offset;
1395 pending_cost = cost;
1398 if (link[L].lcpprev < lenL) {
1399 /* Match length decreased. */
1401 lenL = link[L].lcpprev;
1403 /* Save the pending match unless the
1404 * right side still may have matches of
1405 * this length to be scanned, or if a
1406 * previous (longer) match had lower
1408 if (pending.len > lenR) {
1409 if (pending_cost < best_cost) {
1410 best_cost = pending_cost;
1411 matches[nmatches++] = pending;
1412 if (nmatches == max_matches_to_return)
1416 pending_cost = INFINITE_BLOCK_COST;
1418 if (lenL < min_match_len || lenL < lenR)
1428 if (lenR >= min_match_len && lenR > lenL) {
1431 if (--count_remaining == 0)
1432 goto out_save_pending;
1434 input_idx_t offset = i - SA[R];
1436 /* Save match if it has smaller cost. */
1437 cost = lzx_match_cost_fast(offset, queue);
1438 if (cost < pending_cost) {
1439 pending.offset = offset;
1440 pending_cost = cost;
1443 if (link[R].lcpnext < lenR) {
1444 /* Match length decreased. */
1446 lenR = link[R].lcpnext;
1448 /* Save the pending match unless a
1449 * previous (longer) match had lower
1451 if (pending_cost < best_cost) {
1452 matches[nmatches++] = pending;
1453 best_cost = pending_cost;
1454 if (nmatches == max_matches_to_return)
1458 if (lenR < min_match_len || lenR <= lenL)
1462 pending_cost = INFINITE_BLOCK_COST;
1471 if (pending_cost != INFINITE_BLOCK_COST)
1472 matches[nmatches++] = pending;
1479 /* Tell the match-finder to skip the specified number of bytes (@n) in the
1482 lzx_lz_skip_bytes(struct lzx_compressor *ctx, unsigned n)
1484 LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
1485 if (ctx->matches_cached) {
1486 ctx->match_window_pos += n;
1488 ctx->cached_matches_pos +=
1489 ctx->cached_matches[ctx->cached_matches_pos].len + 1;
1493 ctx->cached_matches[ctx->cached_matches_pos++].len = 0;
1494 lzx_lz_update_salink(ctx->match_window_pos++, ctx->SA,
1495 ctx->ISA, ctx->salink);
1500 /* Retrieve a list of matches available at the next position in the input.
1502 * The matches are written to ctx->matches in decreasing order of length, and
1503 * the return value is the number of matches found. */
1505 lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
1506 const struct lzx_lru_queue *queue,
1507 struct raw_match **matches_ret)
1509 unsigned num_matches;
1510 struct raw_match *matches;
1512 LZX_ASSERT(ctx->match_window_pos <= ctx->match_window_end);
1514 matches = &ctx->cached_matches[ctx->cached_matches_pos + 1];
1516 if (ctx->matches_cached) {
1517 num_matches = matches[-1].len;
1519 unsigned min_match_len = LZX_MIN_MATCH_LEN;
1520 if (!ctx->params.alg_params.slow.use_len2_matches)
1521 min_match_len = max(min_match_len, 3);
1522 const u32 max_search_depth = ctx->params.alg_params.slow.max_search_depth;
1523 const u32 max_matches_per_pos = ctx->params.alg_params.slow.max_matches_per_pos;
1525 if (unlikely(max_search_depth == 0 || max_matches_per_pos == 0))
1528 num_matches = lzx_lz_get_matches(ctx->match_window_pos,
1536 max_matches_per_pos);
1537 matches[-1].len = num_matches;
1539 ctx->cached_matches_pos += num_matches + 1;
1540 *matches_ret = matches;
1542 /* Cap the length of returned matches to the number of bytes remaining,
1543 * if it is not the whole window. */
1544 if (ctx->match_window_end < ctx->window_size) {
1545 unsigned maxlen = ctx->match_window_end - ctx->match_window_pos;
1546 for (unsigned i = 0; i < num_matches; i++)
1547 if (matches[i].len > maxlen)
1548 matches[i].len = maxlen;
1551 fprintf(stderr, "Pos %u/%u: %u matches\n",
1552 ctx->match_window_pos, ctx->match_window_end, num_matches);
1553 for (unsigned i = 0; i < num_matches; i++)
1554 fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
1557 #ifdef ENABLE_LZX_DEBUG
1558 for (unsigned i = 0; i < num_matches; i++) {
1559 LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
1560 LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
1561 LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
1562 LZX_ASSERT(matches[i].offset > 0);
1563 LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
1564 LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
1565 &ctx->window[ctx->match_window_pos - matches[i].offset],
1570 ctx->match_window_pos++;
1575 * Reverse the linked list of near-optimal matches so that they can be returned
1576 * in forwards order.
1578 * Returns the first match in the list.
1580 static struct raw_match
1581 lzx_lz_reverse_near_optimal_match_list(struct lzx_compressor *ctx,
1584 unsigned prev_link, saved_prev_link;
1585 unsigned prev_match_offset, saved_prev_match_offset;
1587 ctx->optimum_end_idx = cur_pos;
1589 saved_prev_link = ctx->optimum[cur_pos].prev.link;
1590 saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
1593 prev_link = saved_prev_link;
1594 prev_match_offset = saved_prev_match_offset;
1596 saved_prev_link = ctx->optimum[prev_link].prev.link;
1597 saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
1599 ctx->optimum[prev_link].next.link = cur_pos;
1600 ctx->optimum[prev_link].next.match_offset = prev_match_offset;
1602 cur_pos = prev_link;
1603 } while (cur_pos != 0);
1605 ctx->optimum_cur_idx = ctx->optimum[0].next.link;
1607 return (struct raw_match)
1608 { .len = ctx->optimum_cur_idx,
1609 .offset = ctx->optimum[0].next.match_offset,
1614 * lzx_lz_get_near_optimal_match() -
1616 * Choose the optimal match or literal to use at the next position in the input.
1618 * Unlike a greedy parser that always takes the longest match, or even a
1619 * parser with one match/literal look-ahead like zlib, the algorithm used here
1620 * may look ahead many matches/literals to determine the optimal match/literal to
1621 * output next. The motivation is that the compression ratio is improved if the
1622 * compressor can do things like use a shorter-than-possible match in order to
1623 * allow a longer match later, and also take into account the Huffman code cost
1624 * model rather than simply assuming that longer is better.
1626 * Still, this is not truly an optimal parser because very long matches are
1627 * taken immediately, and the raw match-finder takes some shortcuts. This is
1628 * done to avoid considering many different alternatives that are unlikely to
1629 * be significantly better.
1631 * This algorithm is based on that used in 7-Zip's DEFLATE encoder.
1633 * Each call to this function does one of two things:
1635 * 1. Build a near-optimal sequence of matches/literals, up to some point, that
1636 * will be returned by subsequent calls to this function, then return the
1641 * 2. Return the next match/literal previously computed by a call to this
1644 * This function relies on the following state in the compressor context:
1646 * ctx->window (read-only: preprocessed data being compressed)
1647 * ctx->cost (read-only: cost model to use)
1648 * ctx->optimum (internal state; leave uninitialized)
1649 * ctx->optimum_cur_idx (must set to 0 before first call)
1650 * ctx->optimum_end_idx (must set to 0 before first call)
1652 * Plus any state used by the raw match-finder.
1654 * The return value is a (length, offset) pair specifying the match or literal
1655 * chosen. For literals, the length is less than LZX_MIN_MATCH_LEN and the
1656 * offset is meaningless.
1658 static struct raw_match
1659 lzx_lz_get_near_optimal_match(struct lzx_compressor * ctx)
1661 unsigned num_possible_matches;
1662 struct raw_match *possible_matches;
1663 struct raw_match match;
1664 unsigned longest_match_len;
1666 if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
1667 /* Case 2: Return the next match/literal already found. */
1668 match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
1669 ctx->optimum_cur_idx;
1670 match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
1672 ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
1676 /* Case 1: Compute a new list of matches/literals to return. */
1678 ctx->optimum_cur_idx = 0;
1679 ctx->optimum_end_idx = 0;
1681 /* Get matches at this position. */
1682 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->queue, &possible_matches);
1684 /* If no matches found, return literal. */
1685 if (num_possible_matches == 0)
1686 return (struct raw_match){ .len = 0 };
1688 /* The matches that were found are sorted in decreasing order by length.
1689 * Get the length of the longest one. */
1690 longest_match_len = possible_matches[0].len;
1692 /* Greedy heuristic: if the longest match that was found is greater
1693 * than the number of fast bytes, return it immediately; don't both
1694 * doing more work. */
1695 if (longest_match_len > ctx->params.alg_params.slow.num_fast_bytes) {
1696 lzx_lz_skip_bytes(ctx, longest_match_len - 1);
1697 return possible_matches[0];
1700 /* Calculate the cost to reach the next position by outputting a
1702 ctx->optimum[0].queue = ctx->queue;
1703 ctx->optimum[1].queue = ctx->optimum[0].queue;
1704 ctx->optimum[1].cost = lzx_literal_cost(ctx->window[ctx->match_window_pos],
1706 ctx->optimum[1].prev.link = 0;
1708 /* Calculate the cost to reach any position up to and including that
1709 * reached by the longest match, using the shortest (i.e. closest) match
1710 * that reaches each position. */
1711 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1712 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1713 len <= longest_match_len; len++) {
1715 LZX_ASSERT(match_idx < num_possible_matches);
1717 ctx->optimum[len].queue = ctx->optimum[0].queue;
1718 ctx->optimum[len].prev.link = 0;
1719 ctx->optimum[len].prev.match_offset = possible_matches[match_idx].offset;
1720 ctx->optimum[len].cost = lzx_match_cost(len,
1721 possible_matches[match_idx].offset,
1723 &ctx->optimum[len].queue);
1724 if (len == possible_matches[match_idx].len)
1728 unsigned cur_pos = 0;
1730 /* len_end: greatest index forward at which costs have been calculated
1732 unsigned len_end = longest_match_len;
1735 /* Advance to next position. */
1738 if (cur_pos == len_end || cur_pos == LZX_OPTIM_ARRAY_SIZE)
1739 return lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1741 /* retrieve the number of matches available at this position */
1742 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->optimum[cur_pos].queue,
1745 unsigned new_len = 0;
1747 if (num_possible_matches != 0) {
1748 new_len = possible_matches[0].len;
1750 /* Greedy heuristic: if we found a match greater than
1751 * the number of fast bytes, stop immediately. */
1752 if (new_len > ctx->params.alg_params.slow.num_fast_bytes) {
1754 /* Build the list of matches to return and get
1756 match = lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1758 /* Append the long match to the end of the list. */
1759 ctx->optimum[cur_pos].next.match_offset =
1760 possible_matches[0].offset;
1761 ctx->optimum[cur_pos].next.link = cur_pos + new_len;
1762 ctx->optimum_end_idx = cur_pos + new_len;
1764 /* Skip over the remaining bytes of the long match. */
1765 lzx_lz_skip_bytes(ctx, new_len - 1);
1767 /* Return first match in the list */
1772 /* Consider proceeding with a literal byte. */
1773 block_cost_t cur_cost = ctx->optimum[cur_pos].cost;
1774 block_cost_t cur_plus_literal_cost = cur_cost +
1775 lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
1777 if (cur_plus_literal_cost < ctx->optimum[cur_pos + 1].cost) {
1778 ctx->optimum[cur_pos + 1].cost = cur_plus_literal_cost;
1779 ctx->optimum[cur_pos + 1].prev.link = cur_pos;
1780 ctx->optimum[cur_pos + 1].queue = ctx->optimum[cur_pos].queue;
1783 if (num_possible_matches == 0)
1786 /* Consider proceeding with a match. */
1788 while (len_end < cur_pos + new_len)
1789 ctx->optimum[++len_end].cost = INFINITE_BLOCK_COST;
1791 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1792 len <= new_len; len++) {
1793 LZX_ASSERT(match_idx < num_possible_matches);
1794 struct lzx_lru_queue q = ctx->optimum[cur_pos].queue;
1795 block_cost_t cost = cur_cost + lzx_match_cost(len,
1796 possible_matches[match_idx].offset,
1800 if (cost < ctx->optimum[cur_pos + len].cost) {
1801 ctx->optimum[cur_pos + len].cost = cost;
1802 ctx->optimum[cur_pos + len].prev.link = cur_pos;
1803 ctx->optimum[cur_pos + len].prev.match_offset =
1804 possible_matches[match_idx].offset;
1805 ctx->optimum[cur_pos + len].queue = q;
1808 if (len == possible_matches[match_idx].len)
1815 * Set default symbol costs.
1818 lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
1822 /* Literal symbols */
1823 for (i = 0; i < LZX_NUM_CHARS; i++)
1826 /* Match header symbols */
1827 for (; i < num_main_syms; i++)
1828 costs->main[i] = 10;
1830 /* Length symbols */
1831 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1834 /* Aligned offset symbols */
1835 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1836 costs->aligned[i] = 3;
1839 /* Given the frequencies of symbols in a compressed block and the corresponding
1840 * Huffman codes, return LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM if an
1841 * aligned offset or verbatim block, respectively, will take fewer bits to
1844 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1845 const struct lzx_codes * codes)
1847 unsigned aligned_cost = 0;
1848 unsigned verbatim_cost = 0;
1850 /* Verbatim blocks have a constant 3 bits per position footer. Aligned
1851 * offset blocks have an aligned offset symbol per position footer, plus
1852 * an extra 24 bits to output the lengths necessary to reconstruct the
1853 * aligned offset code itself. */
1854 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1855 verbatim_cost += 3 * freqs->aligned[i];
1856 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1858 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1859 if (aligned_cost < verbatim_cost)
1860 return LZX_BLOCKTYPE_ALIGNED;
1862 return LZX_BLOCKTYPE_VERBATIM;
1865 /* Find a near-optimal sequence of matches/literals with which to output the
1866 * specified LZX block, then set its type to that which has the minimum cost to
1869 lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
1870 unsigned num_passes)
1872 const struct lzx_lru_queue orig_queue = ctx->queue;
1873 struct lzx_freqs freqs;
1875 unsigned orig_window_pos = spec->window_pos;
1876 unsigned orig_cached_pos = ctx->cached_matches_pos;
1878 LZX_ASSERT(ctx->match_window_pos == spec->window_pos);
1880 ctx->match_window_end = spec->window_pos + spec->block_size;
1881 spec->chosen_matches_start_pos = spec->window_pos;
1883 LZX_ASSERT(num_passes >= 1);
1885 /* The first optimal parsing pass is done using the cost model already
1886 * set in ctx->costs. Each later pass is done using a cost model
1887 * computed from the previous pass. */
1888 for (unsigned pass = 0; pass < num_passes; pass++) {
1890 ctx->match_window_pos = orig_window_pos;
1891 ctx->cached_matches_pos = orig_cached_pos;
1892 ctx->queue = orig_queue;
1893 spec->num_chosen_matches = 0;
1894 memset(&freqs, 0, sizeof(freqs));
1896 for (unsigned i = spec->window_pos; i < spec->window_pos + spec->block_size; ) {
1897 struct raw_match raw_match;
1898 struct lzx_match lzx_match;
1900 raw_match = lzx_lz_get_near_optimal_match(ctx);
1901 if (raw_match.len >= LZX_MIN_MATCH_LEN) {
1902 lzx_match.data = lzx_tally_match(raw_match.len, raw_match.offset,
1903 &freqs, &ctx->queue);
1906 lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
1909 ctx->chosen_matches[spec->chosen_matches_start_pos +
1910 spec->num_chosen_matches++] = lzx_match;
1913 lzx_make_huffman_codes(&freqs, &spec->codes,
1914 ctx->num_main_syms);
1915 if (pass < num_passes - 1)
1916 lzx_set_costs(ctx, &spec->codes.lens);
1917 ctx->matches_cached = true;
1919 spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
1920 ctx->matches_cached = false;
1924 lzx_optimize_blocks(struct lzx_compressor *ctx)
1926 lzx_lru_queue_init(&ctx->queue);
1927 ctx->optimum_cur_idx = 0;
1928 ctx->optimum_end_idx = 0;
1930 const unsigned num_passes = ctx->params.alg_params.slow.num_optim_passes;
1932 for (unsigned i = 0; i < ctx->num_blocks; i++)
1933 lzx_optimize_block(ctx, &ctx->block_specs[i], num_passes);
1936 /* Initialize the suffix array match-finder for the specified input. */
1938 lzx_lz_init_matchfinder(const u8 T[const restrict],
1939 const input_idx_t n,
1940 input_idx_t SA[const restrict],
1941 input_idx_t ISA[const restrict],
1942 input_idx_t LCP[const restrict],
1943 struct salink link[const restrict],
1944 const unsigned max_match_len)
1946 /* Compute SA (Suffix Array). */
1949 /* ISA and link are used as temporary space. */
1950 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * sizeof(ISA[0]) < 256 * sizeof(saidx_t));
1951 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * 2 * sizeof(link[0]) < 256 * 256 * sizeof(saidx_t));
1953 if (sizeof(input_idx_t) == sizeof(saidx_t)) {
1954 divsufsort(T, SA, n, (saidx_t*)ISA, (saidx_t*)link);
1957 divsufsort(T, sa, n, (saidx_t*)ISA, (saidx_t*)link);
1958 for (input_idx_t i = 0; i < n; i++)
1963 #ifdef ENABLE_LZX_DEBUG
1967 /* Verify suffix array. */
1971 for (input_idx_t r = 0; r < n; r++) {
1972 input_idx_t i = SA[r];
1974 LZX_ASSERT(!found[i]);
1979 for (input_idx_t r = 0; r < n - 1; r++) {
1981 input_idx_t i1 = SA[r];
1982 input_idx_t i2 = SA[r + 1];
1984 input_idx_t n1 = n - i1;
1985 input_idx_t n2 = n - i2;
1987 LZX_ASSERT(memcmp(&T[i1], &T[i2], min(n1, n2)) <= 0);
1989 LZX_DEBUG("Verified SA (len %u)", n);
1990 #endif /* ENABLE_LZX_DEBUG */
1992 /* Compute ISA (Inverse Suffix Array) */
1993 for (input_idx_t r = 0; r < n; r++)
1996 /* Compute LCP (longest common prefix) array.
1998 * Algorithm adapted from Kasai et al. 2001: "Linear-Time
1999 * Longest-Common-Prefix Computation in Suffix Arrays and Its
2003 for (input_idx_t i = 0; i < n; i++) {
2004 input_idx_t r = ISA[i];
2006 input_idx_t j = SA[r - 1];
2008 input_idx_t lim = min(n - i, n - j);
2010 while (h < lim && T[i + h] == T[j + h])
2019 #ifdef ENABLE_LZX_DEBUG
2020 /* Verify LCP array. */
2021 for (input_idx_t r = 0; r < n - 1; r++) {
2022 LZX_ASSERT(ISA[SA[r]] == r);
2023 LZX_ASSERT(ISA[SA[r + 1]] == r + 1);
2025 input_idx_t i1 = SA[r];
2026 input_idx_t i2 = SA[r + 1];
2027 input_idx_t lcp = LCP[r + 1];
2029 input_idx_t n1 = n - i1;
2030 input_idx_t n2 = n - i2;
2032 LZX_ASSERT(lcp <= min(n1, n2));
2034 LZX_ASSERT(memcmp(&T[i1], &T[i2], lcp) == 0);
2035 if (lcp < min(n1, n2))
2036 LZX_ASSERT(T[i1 + lcp] != T[i2 + lcp]);
2038 #endif /* ENABLE_LZX_DEBUG */
2040 /* Compute salink.next and salink.lcpnext.
2042 * Algorithm adapted from Crochemore et al. 2009:
2043 * "LPF computation revisited".
2045 * Note: we cap lcpnext to the maximum match length so that the
2046 * match-finder need not worry about it later. */
2047 link[n - 1].next = (input_idx_t)~0U;
2048 link[n - 1].prev = (input_idx_t)~0U;
2049 link[n - 1].lcpnext = 0;
2050 link[n - 1].lcpprev = 0;
2051 for (input_idx_t r = n - 2; r != (input_idx_t)~0U; r--) {
2052 input_idx_t t = r + 1;
2053 input_idx_t l = LCP[t];
2054 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2055 l = min(l, link[t].lcpnext);
2059 link[r].lcpnext = min(l, max_match_len);
2060 LZX_ASSERT(t == (input_idx_t)~0U || l <= n - SA[t]);
2061 LZX_ASSERT(l <= n - SA[r]);
2062 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2065 /* Compute salink.prev and salink.lcpprev.
2067 * Algorithm adapted from Crochemore et al. 2009:
2068 * "LPF computation revisited".
2070 * Note: we cap lcpprev to the maximum match length so that the
2071 * match-finder need not worry about it later. */
2072 link[0].prev = (input_idx_t)~0;
2073 link[0].next = (input_idx_t)~0;
2074 link[0].lcpprev = 0;
2075 link[0].lcpnext = 0;
2076 for (input_idx_t r = 1; r < n; r++) {
2077 input_idx_t t = r - 1;
2078 input_idx_t l = LCP[r];
2079 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2080 l = min(l, link[t].lcpprev);
2084 link[r].lcpprev = min(l, max_match_len);
2085 LZX_ASSERT(t == (input_idx_t)~0 || l <= n - SA[t]);
2086 LZX_ASSERT(l <= n - SA[r]);
2087 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2091 /* Prepare the input window into one or more LZX blocks ready to be output. */
2093 lzx_prepare_blocks(struct lzx_compressor * ctx)
2095 /* Initialize the match-finder. */
2096 lzx_lz_init_matchfinder(ctx->window, ctx->window_size,
2097 ctx->SA, ctx->ISA, ctx->LCP, ctx->salink,
2099 ctx->cached_matches_pos = 0;
2100 ctx->matches_cached = false;
2101 ctx->match_window_pos = 0;
2103 /* Set up a default cost model. */
2104 lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
2106 ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
2107 for (unsigned i = 0; i < ctx->num_blocks; i++) {
2108 unsigned pos = LZX_DIV_BLOCK_SIZE * i;
2109 ctx->block_specs[i].window_pos = pos;
2110 ctx->block_specs[i].block_size = min(ctx->window_size - pos, LZX_DIV_BLOCK_SIZE);
2113 /* Determine sequence of matches/literals to output for each block. */
2114 lzx_optimize_blocks(ctx);
2118 * This is the fast version of lzx_prepare_blocks(). This version "quickly"
2119 * prepares a single compressed block containing the entire input. See the
2120 * description of the "Fast algorithm" at the beginning of this file for more
2123 * Input --- the preprocessed data:
2128 * Output --- the block specification and the corresponding match/literal data:
2130 * ctx->block_specs[]
2132 * ctx->chosen_matches[]
2135 lzx_prepare_block_fast(struct lzx_compressor * ctx)
2137 struct lzx_record_ctx record_ctx;
2138 struct lzx_block_spec *spec;
2140 /* Parameters to hash chain LZ match finder
2141 * (lazy with 1 match lookahead) */
2142 static const struct lz_params lzx_lz_params = {
2143 /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
2144 * aren't worth choosing when using greedy or lazy parsing. */
2146 .max_match = LZX_MAX_MATCH_LEN,
2147 .max_offset = LZX_MAX_WINDOW_SIZE,
2148 .good_match = LZX_MAX_MATCH_LEN,
2149 .nice_match = LZX_MAX_MATCH_LEN,
2150 .max_chain_len = LZX_MAX_MATCH_LEN,
2151 .max_lazy_match = LZX_MAX_MATCH_LEN,
2155 /* Initialize symbol frequencies and match offset LRU queue. */
2156 memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
2157 lzx_lru_queue_init(&record_ctx.queue);
2158 record_ctx.matches = ctx->chosen_matches;
2160 /* Determine series of matches/literals to output. */
2161 lz_analyze_block(ctx->window,
2169 /* Set up block specification. */
2170 spec = &ctx->block_specs[0];
2171 spec->block_type = LZX_BLOCKTYPE_ALIGNED;
2172 spec->window_pos = 0;
2173 spec->block_size = ctx->window_size;
2174 spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
2175 spec->chosen_matches_start_pos = 0;
2176 lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
2177 ctx->num_main_syms);
2178 ctx->num_blocks = 1;
2182 do_call_insn_translation(u32 *call_insn_target, int input_pos,
2188 rel_offset = le32_to_cpu(*call_insn_target);
2189 if (rel_offset >= -input_pos && rel_offset < file_size) {
2190 if (rel_offset < file_size - input_pos) {
2191 /* "good translation" */
2192 abs_offset = rel_offset + input_pos;
2194 /* "compensating translation" */
2195 abs_offset = rel_offset - file_size;
2197 *call_insn_target = cpu_to_le32(abs_offset);
2201 /* This is the reverse of undo_call_insn_preprocessing() in lzx-decompress.c.
2202 * See the comment above that function for more information. */
2204 do_call_insn_preprocessing(u8 data[], int size)
2206 for (int i = 0; i < size - 10; i++) {
2207 if (data[i] == 0xe8) {
2208 do_call_insn_translation((u32*)&data[i + 1], i,
2209 LZX_WIM_MAGIC_FILESIZE);
2216 lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
2217 void *compressed_data, size_t compressed_size_avail, void *_ctx)
2219 struct lzx_compressor *ctx = _ctx;
2220 struct output_bitstream ostream;
2221 size_t compressed_size;
2223 if (uncompressed_size < 100) {
2224 LZX_DEBUG("Too small to bother compressing.");
2228 if (uncompressed_size > ctx->max_window_size) {
2229 LZX_DEBUG("Can't compress %zu bytes using window of %u bytes!",
2230 uncompressed_size, ctx->max_window_size);
2234 LZX_DEBUG("Attempting to compress %zu bytes...",
2237 /* The input data must be preprocessed. To avoid changing the original
2238 * input, copy it to a temporary buffer. */
2239 memcpy(ctx->window, uncompressed_data, uncompressed_size);
2240 ctx->window_size = uncompressed_size;
2242 /* This line is unnecessary; it just avoids inconsequential accesses of
2243 * uninitialized memory that would show up in memory-checking tools such
2245 memset(&ctx->window[ctx->window_size], 0, 12);
2247 LZX_DEBUG("Preprocessing data...");
2249 /* Before doing any actual compression, do the call instruction (0xe8
2250 * byte) translation on the uncompressed data. */
2251 do_call_insn_preprocessing(ctx->window, ctx->window_size);
2253 LZX_DEBUG("Preparing blocks...");
2255 /* Prepare the compressed data. */
2256 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
2257 lzx_prepare_block_fast(ctx);
2259 lzx_prepare_blocks(ctx);
2261 LZX_DEBUG("Writing compressed blocks...");
2263 /* Generate the compressed data. */
2264 init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
2265 lzx_write_all_blocks(ctx, &ostream);
2267 LZX_DEBUG("Flushing bitstream...");
2268 compressed_size = flush_output_bitstream(&ostream);
2269 if (compressed_size == ~(input_idx_t)0) {
2270 LZX_DEBUG("Data did not compress to %zu bytes or less!",
2271 compressed_size_avail);
2275 LZX_DEBUG("Done: compressed %zu => %zu bytes.",
2276 uncompressed_size, compressed_size);
2278 /* Verify that we really get the same thing back when decompressing.
2279 * Although this could be disabled by default in all cases, it only
2280 * takes around 2-3% of the running time of the slow algorithm to do the
2282 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
2283 #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
2288 struct wimlib_decompressor *decompressor;
2290 if (0 == wimlib_create_decompressor(WIMLIB_COMPRESSION_TYPE_LZX,
2291 ctx->max_window_size,
2296 ret = wimlib_decompress(compressed_data,
2301 wimlib_free_decompressor(decompressor);
2304 ERROR("Failed to decompress data we "
2305 "compressed using LZX algorithm");
2309 if (memcmp(uncompressed_data, ctx->window, uncompressed_size)) {
2310 ERROR("Data we compressed using LZX algorithm "
2311 "didn't decompress to original");
2316 WARNING("Failed to create decompressor for "
2317 "data verification!");
2320 return compressed_size;
2324 lzx_params_valid(const struct wimlib_lzx_compressor_params *params)
2326 /* Validate parameters. */
2327 if (params->hdr.size != sizeof(struct wimlib_lzx_compressor_params)) {
2328 LZX_DEBUG("Invalid parameter structure size!");
2332 if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
2333 params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
2335 LZX_DEBUG("Invalid algorithm.");
2339 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2340 if (params->alg_params.slow.num_optim_passes < 1)
2342 LZX_DEBUG("Invalid number of optimization passes!");
2346 if (params->alg_params.slow.main_nostat_cost < 1 ||
2347 params->alg_params.slow.main_nostat_cost > 16)
2349 LZX_DEBUG("Invalid main_nostat_cost!");
2353 if (params->alg_params.slow.len_nostat_cost < 1 ||
2354 params->alg_params.slow.len_nostat_cost > 16)
2356 LZX_DEBUG("Invalid len_nostat_cost!");
2360 if (params->alg_params.slow.aligned_nostat_cost < 1 ||
2361 params->alg_params.slow.aligned_nostat_cost > 8)
2363 LZX_DEBUG("Invalid aligned_nostat_cost!");
2371 lzx_free_compressor(void *_ctx)
2373 struct lzx_compressor *ctx = _ctx;
2376 FREE(ctx->chosen_matches);
2377 FREE(ctx->cached_matches);
2381 FREE(ctx->block_specs);
2382 FREE(ctx->prev_tab);
2389 lzx_create_compressor(size_t window_size,
2390 const struct wimlib_compressor_params_header *_params,
2393 const struct wimlib_lzx_compressor_params *params =
2394 (const struct wimlib_lzx_compressor_params*)_params;
2395 struct lzx_compressor *ctx;
2397 LZX_DEBUG("Allocating LZX context...");
2399 if (!lzx_window_size_valid(window_size))
2400 return WIMLIB_ERR_INVALID_PARAM;
2402 static const struct wimlib_lzx_compressor_params fast_default = {
2404 .size = sizeof(struct wimlib_lzx_compressor_params),
2406 .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
2413 static const struct wimlib_lzx_compressor_params slow_default = {
2415 .size = sizeof(struct wimlib_lzx_compressor_params),
2417 .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
2421 .use_len2_matches = 1,
2422 .num_fast_bytes = 32,
2423 .num_optim_passes = 2,
2424 .max_search_depth = 50,
2425 .max_matches_per_pos = 3,
2426 .main_nostat_cost = 15,
2427 .len_nostat_cost = 15,
2428 .aligned_nostat_cost = 7,
2434 if (!lzx_params_valid(params))
2435 return WIMLIB_ERR_INVALID_PARAM;
2437 LZX_DEBUG("Using default algorithm and parameters.");
2438 params = &slow_default;
2441 if (params->use_defaults) {
2442 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
2443 params = &slow_default;
2445 params = &fast_default;
2448 LZX_DEBUG("Allocating memory.");
2450 ctx = CALLOC(1, sizeof(struct lzx_compressor));
2454 ctx->num_main_syms = lzx_get_num_main_syms(window_size);
2455 ctx->max_window_size = window_size;
2456 ctx->window = MALLOC(window_size + 12);
2457 if (ctx->window == NULL)
2460 if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
2461 ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
2462 if (ctx->prev_tab == NULL)
2466 size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
2467 ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
2468 if (ctx->block_specs == NULL)
2471 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2472 ctx->SA = MALLOC(3U * window_size * sizeof(ctx->SA[0]));
2473 if (ctx->SA == NULL)
2475 ctx->ISA = ctx->SA + window_size;
2476 ctx->LCP = ctx->ISA + window_size;
2478 ctx->salink = MALLOC(window_size * sizeof(ctx->salink[0]));
2479 if (ctx->salink == NULL)
2483 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2484 ctx->optimum = MALLOC((LZX_OPTIM_ARRAY_SIZE + LZX_MAX_MATCH_LEN) *
2485 sizeof(ctx->optimum[0]));
2486 if (ctx->optimum == NULL)
2490 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2493 cache_per_pos = params->alg_params.slow.max_matches_per_pos;
2494 if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
2495 cache_per_pos = LZX_MAX_CACHE_PER_POS;
2497 ctx->cached_matches = MALLOC(window_size * (cache_per_pos + 1) *
2498 sizeof(ctx->cached_matches[0]));
2499 if (ctx->cached_matches == NULL)
2503 ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
2504 if (ctx->chosen_matches == NULL)
2507 memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_compressor_params));
2508 memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
2510 LZX_DEBUG("Successfully allocated new LZX context.");
2516 lzx_free_compressor(ctx);
2517 return WIMLIB_ERR_NOMEM;
2520 const struct compressor_ops lzx_compressor_ops = {
2521 .create_compressor = lzx_create_compressor,
2522 .compress = lzx_compress,
2523 .free_compressor = lzx_free_compressor,