4 * LZX compression routines
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
28 * This file contains a compressor for the LZX compression format, as used in
29 * the WIM file format.
34 * First, the primary reference for the LZX compression format is the
35 * specification released by Microsoft.
37 * Second, the comments in lzx-decompress.c provide some more information about
38 * the LZX compression format, including errors in the Microsoft specification.
40 * Do note that LZX shares many similarities with DEFLATE, the algorithm used by
41 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding,
42 * and certain other details are quite similar, such as the method for storing
43 * Huffman codes. However, some of the main differences are:
45 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
46 * compressible before attempting to compress it further.
47 * - LZX uses a "main" alphabet which combines literals and matches, with the
48 * match symbols containing a "length header" (giving all or part of the match
49 * length) and a "position slot" (giving, roughly speaking, the order of
50 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks; however it does have two types of
52 * dynamic Huffman blocks ("aligned offset" and "verbatim").
53 * - LZX has a minimum match length of 2 rather than 3.
54 * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
55 * queue of match offsets. This is very useful for certain types of files,
56 * such as binary files that have repeating records.
61 * There are actually two distinct overall algorithms implemented here. We
62 * shall refer to them as the "slow" algorithm and the "fast" algorithm. The
63 * "slow" algorithm spends more time compressing to achieve a higher compression
64 * ratio compared to the "fast" algorithm. More details are presented below.
69 * The "slow" algorithm to generate LZX-compressed data is roughly as follows:
71 * 1. Preprocess the input data to translate the targets of x86 call
72 * instructions to absolute offsets.
74 * 2. Build the suffix array and inverse suffix array for the input data. The
75 * suffix array contains the indices of all suffixes of the input data,
76 * sorted lexcographically by the corresponding suffixes. The "position" of
77 * a suffix is the index of that suffix in the original string, whereas the
78 * "rank" of a suffix is the index at which that suffix's position is found
79 * in the suffix array.
81 * 3. Build the longest common prefix array corresponding to the suffix array.
83 * 4. For each suffix, find the highest lower ranked suffix that has a lower
84 * position, the lowest higher ranked suffix that has a lower position, and
85 * the length of the common prefix shared between each. This information is
86 * later used to link suffix ranks into a doubly-linked list for searching
89 * 5. Set a default cost model for matches/literals.
91 * 6. Determine the lowest cost sequence of LZ77 matches ((offset, length)
92 * pairs) and literal bytes to divide the input into. Raw match-finding is
93 * done by searching the suffix array using a linked list to avoid
94 * considering any suffixes that start after the current position. Each run
95 * of the match-finder returns the approximate lowest-cost longest match as
96 * well as any shorter matches that have even lower approximate costs. Each
97 * such run also adds the suffix rank of the current position into the linked
98 * list being used to search the suffix array. Parsing, or match-choosing,
99 * is solved as a minimum-cost path problem using a forward "optimal parsing"
100 * algorithm based on the Deflate encoder from 7-Zip. This algorithm moves
101 * forward calculating the minimum cost to reach each byte until either a
102 * very long match is found or until a position is found at which no matches
105 * 7. Build the Huffman codes needed to output the matches/literals.
107 * 8. Up to a certain number of iterations, use the resulting Huffman codes to
108 * refine a cost model and go back to Step #6 to determine an improved
109 * sequence of matches and literals.
111 * 9. Output the resulting block using the match/literal sequences and the
112 * Huffman codes that were computed for the block.
114 * Note: the algorithm does not yet attempt to split the input into multiple LZX
115 * blocks, instead using a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
120 * The fast algorithm (and the only one available in wimlib v1.5.1 and earlier)
121 * spends much less time on the main bottlenecks of the compression process ---
122 * that is, the match finding and match choosing. Matches are found and chosen
123 * with hash chains using a greedy parse with one position of look-ahead. No
124 * block splitting is done; only compressing the full input into an aligned
125 * offset block is considered.
130 * The old API (retained for backward compatibility) consists of just one
133 * wimlib_lzx_compress()
135 * The new compressor has more potential parameters and needs more memory, so
136 * the new API ties up memory allocations and compression parameters into a
139 * wimlib_lzx_alloc_context()
140 * wimlib_lzx_compress2()
141 * wimlib_lzx_free_context()
142 * wimlib_lzx_set_default_params()
144 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() are designed to
145 * compress an in-memory buffer of up to the window size, which can be any power
146 * of two between 2^15 and 2^21 inclusively. However, by default, the WIM
147 * format uses 2^15, and this is seemingly the only value that is compatible
148 * with WIMGAPI. In any case, the window is not a true "sliding window" since
149 * no data is ever "slid out" of the window. This is needed for the WIM format,
150 * which is designed such that chunks may be randomly accessed.
152 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() return 0 if the data
153 * could not be compressed to less than the size of the uncompressed data.
154 * Again, this is suitable for the WIM format, which stores such data chunks
157 * The functions in this LZX compression API are exported from the library,
158 * although with the possible exception of wimlib_lzx_set_default_params(), this
159 * is only in case other programs happen to have uses for it other than WIM
160 * reading/writing as already handled through the rest of the library.
165 * Acknowledgments to several open-source projects and research papers that made
166 * it possible to implement this code:
168 * - divsufsort (author: Yuta Mori), for the suffix array construction code,
169 * located in a separate directory (divsufsort/).
171 * - "Linear-Time Longest-Common-Prefix Computation in Suffix Arrays and Its
172 * Applications" (Kasai et al. 2001), for the LCP array computation.
174 * - "LPF computation revisited" (Crochemore et al. 2009) for the prev and next
175 * array computations.
177 * - 7-Zip (author: Igor Pavlov) for the algorithm for forward optimal parsing
180 * - zlib (author: Jean-loup Gailly and Mark Adler), for the hash table
181 * match-finding algorithm (used in lz77.c).
183 * - lzx-compress (author: Matthew T. Russotto), on which some parts of this
184 * code were originally based.
192 #include "wimlib/compress.h"
193 #include "wimlib/error.h"
194 #include "wimlib/lzx.h"
195 #include "wimlib/util.h"
200 #ifdef ENABLE_LZX_DEBUG
201 # include "wimlib/decompress.h"
204 #include "divsufsort/divsufsort.h"
206 typedef u32 block_cost_t;
207 #define INFINITE_BLOCK_COST ((block_cost_t)~0U)
209 #define LZX_OPTIM_ARRAY_SIZE 4096
211 #define LZX_DIV_BLOCK_SIZE 32768
213 #define LZX_MAX_CACHE_PER_POS 10
215 /* Codewords for the LZX main, length, and aligned offset Huffman codes */
216 struct lzx_codewords {
217 u16 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
218 u16 len[LZX_LENCODE_NUM_SYMBOLS];
219 u16 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
222 /* Codeword lengths (in bits) for the LZX main, length, and aligned offset
225 * A 0 length means the codeword has zero frequency.
228 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
229 u8 len[LZX_LENCODE_NUM_SYMBOLS];
230 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
233 /* Costs for the LZX main, length, and aligned offset Huffman symbols.
235 * If a codeword has zero frequency, it must still be assigned some nonzero cost
236 * --- generally a high cost, since even if it gets used in the next iteration,
237 * it probably will not be used very times. */
239 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
240 u8 len[LZX_LENCODE_NUM_SYMBOLS];
241 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
244 /* The LZX main, length, and aligned offset Huffman codes */
246 struct lzx_codewords codewords;
247 struct lzx_lens lens;
250 /* Tables for tallying symbol frequencies in the three LZX alphabets */
252 freq_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
253 freq_t len[LZX_LENCODE_NUM_SYMBOLS];
254 freq_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
257 /* LZX intermediate match/literal format */
261 * 31 1 if a match, 0 if a literal.
263 * 30-25 position slot. This can be at most 50, so it will fit in 6
266 * 8-24 position footer. This is the offset of the real formatted
267 * offset from the position base. This can be at most 17 bits
268 * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
270 * 0-7 length of match, minus 2. This can be at most
271 * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
275 /* Raw LZ match/literal format: just a length and offset.
277 * The length is the number of bytes of the match, and the offset is the number
278 * of bytes back in the input the match is from the current position.
280 * If @len < LZX_MIN_MATCH_LEN, then it's really just a literal byte and @offset is
287 /* Specification for an LZX block. */
288 struct lzx_block_spec {
290 /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
294 /* 0-based position in the window at which this block starts. */
295 input_idx_t window_pos;
297 /* The number of bytes of uncompressed data this block represents. */
298 input_idx_t block_size;
300 /* The position in the 'chosen_matches' array in the `struct
301 * lzx_compressor' at which the match/literal specifications for
302 * this block begin. */
303 input_idx_t chosen_matches_start_pos;
305 /* The number of match/literal specifications for this block. */
306 input_idx_t num_chosen_matches;
308 /* Huffman codes for this block. */
309 struct lzx_codes codes;
313 * An array of these structures is used during the match-choosing algorithm.
314 * They correspond to consecutive positions in the window and are used to keep
315 * track of the cost to reach each position, and the match/literal choices that
316 * need to be chosen to reach that position.
319 /* The approximate minimum cost, in bits, to reach this position in the
320 * window which has been found so far. */
323 /* The union here is just for clarity, since the fields are used in two
324 * slightly different ways. Initially, the @prev structure is filled in
325 * first, and links go from later in the window to earlier in the
326 * window. Later, @next structure is filled in and links go from
327 * earlier in the window to later in the window. */
330 /* Position of the start of the match or literal that
331 * was taken to get to this position in the approximate
332 * minimum-cost parse. */
335 /* Offset (as in an LZ (length, offset) pair) of the
336 * match or literal that was taken to get to this
337 * position in the approximate minimum-cost parse. */
338 input_idx_t match_offset;
341 /* Position at which the match or literal starting at
342 * this position ends in the minimum-cost parse. */
345 /* Offset (as in an LZ (length, offset) pair) of the
346 * match or literal starting at this position in the
347 * approximate minimum-cost parse. */
348 input_idx_t match_offset;
352 /* The match offset LRU queue that will exist when the approximate
353 * minimum-cost path to reach this position is taken. */
354 struct lzx_lru_queue queue;
357 /* Suffix array link */
359 /* Rank of highest ranked suffix that has rank lower than the suffix
360 * corresponding to this structure and either has a lower position
361 * (initially) or has a position lower than the highest position at
362 * which matches have been searched for so far, or -1 if there is no
366 /* Rank of lowest ranked suffix that has rank greater than the suffix
367 * corresponding to this structure and either has a lower position
368 * (intially) or has a position lower than the highest position at which
369 * matches have been searched for so far, or -1 if there is no such
373 /* Length of longest common prefix between the suffix corresponding to
374 * this structure and the suffix with rank @prev, or 0 if @prev is -1.
378 /* Length of longest common prefix between the suffix corresponding to
379 * this structure and the suffix with rank @next, or 0 if @next is -1.
384 /* State of the LZX compressor. */
385 struct lzx_compressor {
387 /* The parameters that were used to create the compressor. */
388 struct wimlib_lzx_params params;
390 /* The buffer of data to be compressed.
392 * 0xe8 byte preprocessing is done directly on the data here before
393 * further compression.
395 * Note that this compressor does *not* use a real sliding window!!!!
396 * It's not needed in the WIM format, since every chunk is compressed
397 * independently. This is by design, to allow random access to the
400 * We reserve a few extra bytes to potentially allow reading off the end
401 * of the array in the match-finding code for optimization purposes.
405 /* Number of bytes of data to be compressed, which is the number of
406 * bytes of data in @window that are actually valid. */
407 input_idx_t window_size;
409 /* Allocated size of the @window. */
410 input_idx_t max_window_size;
412 /* Number of symbols in the main alphabet (depends on the
413 * @max_window_size since it determines the maximum allowed offset). */
414 unsigned num_main_syms;
416 /* The current match offset LRU queue. */
417 struct lzx_lru_queue queue;
419 /* Space for the sequences of matches/literals that were chosen for each
421 struct lzx_match *chosen_matches;
423 /* Information about the LZX blocks the preprocessed input was divided
425 struct lzx_block_spec *block_specs;
427 /* Number of LZX blocks the input was divided into; a.k.a. the number of
428 * elements of @block_specs that are valid. */
431 /* This is simply filled in with zeroes and used to avoid special-casing
432 * the output of the first compressed Huffman code, which conceptually
433 * has a delta taken from a code with all symbols having zero-length
435 struct lzx_codes zero_codes;
437 /* The current cost model. */
438 struct lzx_costs costs;
440 /* Fast algorithm only: Array of hash table links. */
441 input_idx_t *prev_tab;
443 /* Suffix array for window.
444 * This is a mapping from suffix rank to suffix position. */
447 /* Inverse suffix array for window.
448 * This is a mapping from suffix position to suffix rank.
449 * If 0 <= r < window_size, then ISA[SA[r]] == r. */
452 /* Longest common prefix array corresponding to the suffix array SA.
453 * LCP[i] is the length of the longest common prefix between the
454 * suffixes with positions SA[i - 1] and SA[i]. LCP[0] is undefined.
458 /* Suffix array links.
460 * During a linear scan of the input string to find matches, this array
461 * used to keep track of which rank suffixes in the suffix array appear
462 * before the current position. Instead of searching in the original
463 * suffix array, scans for matches at a given position traverse a linked
464 * list containing only suffixes that appear before that position. */
465 struct salink *salink;
467 /* Position in window of next match to return. */
468 input_idx_t match_window_pos;
470 /* The match-finder shall ensure the length of matches does not exceed
471 * this position in the input. */
472 input_idx_t match_window_end;
474 /* Matches found by the match-finder are cached in the following array
475 * to achieve a slight speedup when the same matches are needed on
476 * subsequent passes. This is suboptimal because different matches may
477 * be preferred with different cost models, but seems to be a worthwhile
479 struct raw_match *cached_matches;
480 unsigned cached_matches_pos;
483 /* Slow algorithm only: Temporary space used for match-choosing
486 * The size of this array must be at least LZX_MAX_MATCH_LEN but
487 * otherwise is arbitrary. More space simply allows the match-choosing
488 * algorithm to potentially find better matches (depending on the input,
490 struct lzx_optimal *optimum;
492 /* Slow algorithm only: Variables used by the match-choosing algorithm.
494 * When matches have been chosen, optimum_cur_idx is set to the position
495 * in the window of the next match/literal to return and optimum_end_idx
496 * is set to the position in the window at the end of the last
497 * match/literal to return. */
502 /* Returns the LZX position slot that corresponds to a given match offset,
503 * taking into account the recent offset queue and updating it if the offset is
506 lzx_get_position_slot(unsigned offset, struct lzx_lru_queue *queue)
508 unsigned position_slot;
510 /* See if the offset was recently used. */
511 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
512 if (offset == queue->R[i]) {
515 /* Bring the repeat offset to the front of the
516 * queue. Note: this is, in fact, not a real
517 * LRU queue because repeat matches are simply
518 * swapped to the front. */
519 swap(queue->R[0], queue->R[i]);
521 /* The resulting position slot is simply the first index
522 * at which the offset was found in the queue. */
527 /* The offset was not recently used; look up its real position slot. */
528 position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
530 /* Bring the new offset to the front of the queue. */
531 for (unsigned i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
532 queue->R[i] = queue->R[i - 1];
533 queue->R[0] = offset;
535 return position_slot;
538 /* Build the main, length, and aligned offset Huffman codes used in LZX.
540 * This takes as input the frequency tables for each code and produces as output
541 * a set of tables that map symbols to codewords and codeword lengths. */
543 lzx_make_huffman_codes(const struct lzx_freqs *freqs,
544 struct lzx_codes *codes,
545 unsigned num_main_syms)
547 make_canonical_huffman_code(num_main_syms,
548 LZX_MAX_MAIN_CODEWORD_LEN,
551 codes->codewords.main);
553 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
554 LZX_MAX_LEN_CODEWORD_LEN,
557 codes->codewords.len);
559 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
560 LZX_MAX_ALIGNED_CODEWORD_LEN,
563 codes->codewords.aligned);
567 * Output an LZX match.
569 * @out: The bitstream to write the match to.
570 * @block_type: The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM)
572 * @codes: Pointer to a structure that contains the codewords for the
573 * main, length, and aligned offset Huffman codes.
576 lzx_write_match(struct output_bitstream *out, int block_type,
577 struct lzx_match match, const struct lzx_codes *codes)
579 /* low 8 bits are the match length minus 2 */
580 unsigned match_len_minus_2 = match.data & 0xff;
581 /* Next 17 bits are the position footer */
582 unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
583 /* Next 6 bits are the position slot. */
584 unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
587 unsigned main_symbol;
588 unsigned num_extra_bits;
589 unsigned verbatim_bits;
590 unsigned aligned_bits;
592 /* If the match length is less than MIN_MATCH_LEN (= 2) +
593 * NUM_PRIMARY_LENS (= 7), the length header contains
594 * the match length minus MIN_MATCH_LEN, and there is no
597 * Otherwise, the length header contains
598 * NUM_PRIMARY_LENS, and the length footer contains
599 * the match length minus NUM_PRIMARY_LENS minus
601 if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
602 len_header = match_len_minus_2;
603 /* No length footer-- mark it with a special
605 len_footer = (unsigned)(-1);
607 len_header = LZX_NUM_PRIMARY_LENS;
608 len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
611 /* Combine the position slot with the length header into a single symbol
612 * that will be encoded with the main code.
614 * The actual main symbol is offset by LZX_NUM_CHARS because values
615 * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
617 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
619 /* Output main symbol. */
620 bitstream_put_bits(out, codes->codewords.main[main_symbol],
621 codes->lens.main[main_symbol]);
623 /* If there is a length footer, output it using the
624 * length Huffman code. */
625 if (len_footer != (unsigned)(-1)) {
626 bitstream_put_bits(out, codes->codewords.len[len_footer],
627 codes->lens.len[len_footer]);
630 num_extra_bits = lzx_get_num_extra_bits(position_slot);
632 /* For aligned offset blocks with at least 3 extra bits, output the
633 * verbatim bits literally, then the aligned bits encoded using the
634 * aligned offset code. Otherwise, only the verbatim bits need to be
636 if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
638 verbatim_bits = position_footer >> 3;
639 bitstream_put_bits(out, verbatim_bits,
642 aligned_bits = (position_footer & 7);
643 bitstream_put_bits(out,
644 codes->codewords.aligned[aligned_bits],
645 codes->lens.aligned[aligned_bits]);
647 /* verbatim bits is the same as the position
648 * footer, in this case. */
649 bitstream_put_bits(out, position_footer, num_extra_bits);
654 lzx_build_precode(const u8 lens[restrict],
655 const u8 prev_lens[restrict],
656 const unsigned num_syms,
657 freq_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
658 u8 output_syms[restrict num_syms],
659 u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
660 u16 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
661 unsigned *num_additional_bits_ret)
663 memset(precode_freqs, 0,
664 LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
666 /* Since the code word lengths use a form of RLE encoding, the goal here
667 * is to find each run of identical lengths when going through them in
668 * symbol order (including runs of length 1). For each run, as many
669 * lengths are encoded using RLE as possible, and the rest are output
672 * output_syms[] will be filled in with the length symbols that will be
673 * output, including RLE codes, not yet encoded using the precode.
675 * cur_run_len keeps track of how many code word lengths are in the
676 * current run of identical lengths. */
677 unsigned output_syms_idx = 0;
678 unsigned cur_run_len = 1;
679 unsigned num_additional_bits = 0;
680 for (unsigned i = 1; i <= num_syms; i++) {
682 if (i != num_syms && lens[i] == lens[i - 1]) {
683 /* Still in a run--- keep going. */
688 /* Run ended! Check if it is a run of zeroes or a run of
691 /* The symbol that was repeated in the run--- not to be confused
692 * with the length *of* the run (cur_run_len) */
693 unsigned len_in_run = lens[i - 1];
695 if (len_in_run == 0) {
696 /* A run of 0's. Encode it in as few length
697 * codes as we can. */
699 /* The magic length 18 indicates a run of 20 + n zeroes,
700 * where n is an uncompressed literal 5-bit integer that
701 * follows the magic length. */
702 while (cur_run_len >= 20) {
703 unsigned additional_bits;
705 additional_bits = min(cur_run_len - 20, 0x1f);
706 num_additional_bits += 5;
708 output_syms[output_syms_idx++] = 18;
709 output_syms[output_syms_idx++] = additional_bits;
710 cur_run_len -= 20 + additional_bits;
713 /* The magic length 17 indicates a run of 4 + n zeroes,
714 * where n is an uncompressed literal 4-bit integer that
715 * follows the magic length. */
716 while (cur_run_len >= 4) {
717 unsigned additional_bits;
719 additional_bits = min(cur_run_len - 4, 0xf);
720 num_additional_bits += 4;
722 output_syms[output_syms_idx++] = 17;
723 output_syms[output_syms_idx++] = additional_bits;
724 cur_run_len -= 4 + additional_bits;
729 /* A run of nonzero lengths. */
731 /* The magic length 19 indicates a run of 4 + n
732 * nonzeroes, where n is a literal bit that follows the
733 * magic length, and where the value of the lengths in
734 * the run is given by an extra length symbol, encoded
735 * with the precode, that follows the literal bit.
737 * The extra length symbol is encoded as a difference
738 * from the length of the codeword for the first symbol
739 * in the run in the previous code.
741 while (cur_run_len >= 4) {
742 unsigned additional_bits;
745 additional_bits = (cur_run_len > 4);
746 num_additional_bits += 1;
747 delta = (signed char)prev_lens[i - cur_run_len] -
748 (signed char)len_in_run;
752 precode_freqs[(unsigned char)delta]++;
753 output_syms[output_syms_idx++] = 19;
754 output_syms[output_syms_idx++] = additional_bits;
755 output_syms[output_syms_idx++] = delta;
756 cur_run_len -= 4 + additional_bits;
760 /* Any remaining lengths in the run are outputted without RLE,
761 * as a difference from the length of that codeword in the
763 while (cur_run_len > 0) {
766 delta = (signed char)prev_lens[i - cur_run_len] -
767 (signed char)len_in_run;
771 precode_freqs[(unsigned char)delta]++;
772 output_syms[output_syms_idx++] = delta;
779 /* Build the precode from the frequencies of the length symbols. */
781 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
782 LZX_MAX_PRE_CODEWORD_LEN,
783 precode_freqs, precode_lens,
786 *num_additional_bits_ret = num_additional_bits;
788 return output_syms_idx;
792 * Writes a compressed Huffman code to the output, preceded by the precode for
795 * The Huffman code is represented in the output as a series of path lengths
796 * from which the canonical Huffman code can be reconstructed. The path lengths
797 * themselves are compressed using a separate Huffman code, the precode, which
798 * consists of LZX_PRECODE_NUM_SYMBOLS (= 20) symbols that cover all possible
799 * code lengths, plus extra codes for repeated lengths. The path lengths of the
800 * precode precede the path lengths of the larger code and are uncompressed,
801 * consisting of 20 entries of 4 bits each.
803 * @out: Bitstream to write the code to.
804 * @lens: The code lengths for the Huffman code, indexed by symbol.
805 * @prev_lens: Code lengths for this Huffman code, indexed by symbol,
806 * in the *previous block*, or all zeroes if this is the
808 * @num_syms: The number of symbols in the code.
811 lzx_write_compressed_code(struct output_bitstream *out,
812 const u8 lens[restrict],
813 const u8 prev_lens[restrict],
816 freq_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
817 u8 output_syms[num_syms];
818 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
819 u16 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
821 unsigned num_output_syms;
825 num_output_syms = lzx_build_precode(lens,
834 /* Write the lengths of the precode codes to the output. */
835 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
836 bitstream_put_bits(out, precode_lens[i],
837 LZX_PRECODE_ELEMENT_SIZE);
839 /* Write the length symbols, encoded with the precode, to the output. */
841 for (i = 0; i < num_output_syms; ) {
842 precode_sym = output_syms[i++];
844 bitstream_put_bits(out, precode_codewords[precode_sym],
845 precode_lens[precode_sym]);
846 switch (precode_sym) {
848 bitstream_put_bits(out, output_syms[i++], 4);
851 bitstream_put_bits(out, output_syms[i++], 5);
854 bitstream_put_bits(out, output_syms[i++], 1);
855 bitstream_put_bits(out,
856 precode_codewords[output_syms[i]],
857 precode_lens[output_syms[i]]);
867 * Writes all compressed matches and literal bytes in an LZX block to the the
871 * The output bitstream.
873 * The type of the block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM).
875 * The array of matches/literals that will be output (length @match_count).
877 * Number of matches/literals to be output.
879 * Pointer to a structure that contains the codewords for the main, length,
880 * and aligned offset Huffman codes.
883 lzx_write_matches_and_literals(struct output_bitstream *ostream,
885 const struct lzx_match match_tab[],
886 unsigned match_count,
887 const struct lzx_codes *codes)
889 for (unsigned i = 0; i < match_count; i++) {
890 struct lzx_match match = match_tab[i];
892 /* High bit of the match indicates whether the match is an
893 * actual match (1) or a literal uncompressed byte (0) */
894 if (match.data & 0x80000000) {
896 lzx_write_match(ostream, block_type,
900 bitstream_put_bits(ostream,
901 codes->codewords.main[match.data],
902 codes->lens.main[match.data]);
908 lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
910 #ifdef ENABLE_LZX_DEBUG
913 for (i = 0; i < num_main_syms; i++)
914 LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
916 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
917 LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
919 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
920 LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
922 const unsigned tablebits = 10;
923 u16 decode_table[(1 << tablebits) +
924 (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
925 _aligned_attribute(DECODE_TABLE_ALIGNMENT);
926 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
928 min(tablebits, LZX_MAINCODE_TABLEBITS),
930 LZX_MAX_MAIN_CODEWORD_LEN));
931 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
932 LZX_LENCODE_NUM_SYMBOLS,
933 min(tablebits, LZX_LENCODE_TABLEBITS),
935 LZX_MAX_LEN_CODEWORD_LEN));
936 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
937 LZX_ALIGNEDCODE_NUM_SYMBOLS,
938 min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
940 LZX_MAX_ALIGNED_CODEWORD_LEN));
941 #endif /* ENABLE_LZX_DEBUG */
944 /* Write an LZX aligned offset or verbatim block to the output. */
946 lzx_write_compressed_block(int block_type,
948 unsigned max_window_size,
949 unsigned num_main_syms,
950 struct lzx_match * chosen_matches,
951 unsigned num_chosen_matches,
952 const struct lzx_codes * codes,
953 const struct lzx_codes * prev_codes,
954 struct output_bitstream * ostream)
958 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
959 block_type == LZX_BLOCKTYPE_VERBATIM);
960 lzx_assert_codes_valid(codes, num_main_syms);
962 /* The first three bits indicate the type of block and are one of the
963 * LZX_BLOCKTYPE_* constants. */
964 bitstream_put_bits(ostream, block_type, 3);
966 /* Output the block size.
968 * The original LZX format seemed to always encode the block size in 3
969 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
970 * uses the first bit to indicate whether the block is the default size
971 * (32768) or a different size given explicitly by the next 16 bits.
973 * By default, this compressor uses a window size of 32768 and therefore
974 * follows the WIMGAPI behavior. However, this compressor also supports
975 * window sizes greater than 32768 bytes, which do not appear to be
976 * supported by WIMGAPI. In such cases, we retain the default size bit
977 * to mean a size of 32768 bytes but output non-default block size in 24
978 * bits rather than 16. The compatibility of this behavior is unknown
979 * because WIMs created with chunk size greater than 32768 can seemingly
980 * only be opened by wimlib anyway. */
981 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
982 bitstream_put_bits(ostream, 1, 1);
984 bitstream_put_bits(ostream, 0, 1);
986 if (max_window_size >= 65536)
987 bitstream_put_bits(ostream, (block_size >> 16) & 0xff, 8);
989 bitstream_put_bits(ostream, block_size & 0xffff, 16);
992 /* Write out lengths of the main code. Note that the LZX specification
993 * incorrectly states that the aligned offset code comes after the
994 * length code, but in fact it is the very first code to be written
995 * (before the main code). */
996 if (block_type == LZX_BLOCKTYPE_ALIGNED)
997 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
998 bitstream_put_bits(ostream, codes->lens.aligned[i],
999 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1001 LZX_DEBUG("Writing main code...");
1003 /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
1004 * the main code, which are the codewords for literal bytes. */
1005 lzx_write_compressed_code(ostream,
1007 prev_codes->lens.main,
1010 /* Write the precode and lengths for the rest of the main code, which
1011 * are the codewords for match headers. */
1012 lzx_write_compressed_code(ostream,
1013 codes->lens.main + LZX_NUM_CHARS,
1014 prev_codes->lens.main + LZX_NUM_CHARS,
1015 num_main_syms - LZX_NUM_CHARS);
1017 LZX_DEBUG("Writing length code...");
1019 /* Write the precode and lengths for the length code. */
1020 lzx_write_compressed_code(ostream,
1022 prev_codes->lens.len,
1023 LZX_LENCODE_NUM_SYMBOLS);
1025 LZX_DEBUG("Writing matches and literals...");
1027 /* Write the actual matches and literals. */
1028 lzx_write_matches_and_literals(ostream, block_type,
1029 chosen_matches, num_chosen_matches,
1032 LZX_DEBUG("Done writing block.");
1035 /* Write out the LZX blocks that were computed. */
1037 lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
1040 const struct lzx_codes *prev_codes = &ctx->zero_codes;
1041 for (unsigned i = 0; i < ctx->num_blocks; i++) {
1042 const struct lzx_block_spec *spec = &ctx->block_specs[i];
1044 LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
1045 i + 1, ctx->num_blocks,
1046 spec->block_type, spec->block_size,
1047 spec->num_chosen_matches);
1049 lzx_write_compressed_block(spec->block_type,
1051 ctx->max_window_size,
1053 &ctx->chosen_matches[spec->chosen_matches_start_pos],
1054 spec->num_chosen_matches,
1059 prev_codes = &spec->codes;
1063 /* Constructs an LZX match from a literal byte and updates the main code symbol
1066 lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
1072 /* Constructs an LZX match from an offset and a length, and updates the LRU
1073 * queue and the frequency of symbols in the main, length, and aligned offset
1074 * alphabets. The return value is a 32-bit number that provides the match in an
1075 * intermediate representation documented below. */
1077 lzx_tally_match(unsigned match_len, unsigned match_offset,
1078 struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
1080 unsigned position_slot;
1081 unsigned position_footer;
1083 unsigned main_symbol;
1084 unsigned len_footer;
1085 unsigned adjusted_match_len;
1087 LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
1089 /* The match offset shall be encoded as a position slot (itself encoded
1090 * as part of the main symbol) and a position footer. */
1091 position_slot = lzx_get_position_slot(match_offset, queue);
1092 position_footer = (match_offset + LZX_OFFSET_OFFSET) &
1093 ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
1095 /* The match length shall be encoded as a length header (itself encoded
1096 * as part of the main symbol) and an optional length footer. */
1097 adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
1098 if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
1099 /* No length footer needed. */
1100 len_header = adjusted_match_len;
1102 /* Length footer needed. It will be encoded using the length
1104 len_header = LZX_NUM_PRIMARY_LENS;
1105 len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
1106 freqs->len[len_footer]++;
1109 /* Account for the main symbol. */
1110 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1112 freqs->main[main_symbol]++;
1114 /* In an aligned offset block, 3 bits of the position footer are output
1115 * as an aligned offset symbol. Account for this, although we may
1116 * ultimately decide to output the block as verbatim. */
1118 /* The following check is equivalent to:
1120 * if (lzx_extra_bits[position_slot] >= 3)
1122 * Note that this correctly excludes position slots that correspond to
1123 * recent offsets. */
1124 if (position_slot >= 8)
1125 freqs->aligned[position_footer & 7]++;
1127 /* Pack the position slot, position footer, and match length into an
1128 * intermediate representation. See `struct lzx_match' for details.
1130 LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
1131 LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
1132 LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
1134 LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
1135 LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
1136 LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
1138 (position_slot << 25) |
1139 (position_footer << 8) |
1140 (adjusted_match_len);
1143 struct lzx_record_ctx {
1144 struct lzx_freqs freqs;
1145 struct lzx_lru_queue queue;
1146 struct lzx_match *matches;
1150 lzx_record_match(unsigned len, unsigned offset, void *_ctx)
1152 struct lzx_record_ctx *ctx = _ctx;
1154 (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
1158 lzx_record_literal(u8 lit, void *_ctx)
1160 struct lzx_record_ctx *ctx = _ctx;
1162 (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
1165 /* Returns the cost, in bits, to output a literal byte using the specified cost
1168 lzx_literal_cost(u8 c, const struct lzx_costs * costs)
1170 return costs->main[c];
1173 /* Given a (length, offset) pair that could be turned into a valid LZX match as
1174 * well as costs for the codewords in the main, length, and aligned Huffman
1175 * codes, return the approximate number of bits it will take to represent this
1176 * match in the compressed output. Take into account the match offset LRU
1177 * queue and optionally update it. */
1179 lzx_match_cost(unsigned length, unsigned offset, const struct lzx_costs *costs,
1180 struct lzx_lru_queue *queue)
1182 unsigned position_slot;
1183 unsigned len_header, main_symbol;
1186 position_slot = lzx_get_position_slot(offset, queue);
1188 len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1189 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1191 /* Account for main symbol. */
1192 cost += costs->main[main_symbol];
1194 /* Account for extra position information. */
1195 unsigned num_extra_bits = lzx_get_num_extra_bits(position_slot);
1196 if (num_extra_bits >= 3) {
1197 cost += num_extra_bits - 3;
1198 cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1200 cost += num_extra_bits;
1203 /* Account for extra length information. */
1204 if (len_header == LZX_NUM_PRIMARY_LENS)
1205 cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1211 /* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
1212 * Unlike lzx_match_cost() which does a true cost evaluation, this simply
1213 * prioritize matches based on their offset. */
1215 lzx_match_cost_fast(unsigned offset, const struct lzx_lru_queue *queue)
1217 /* It seems well worth it to take the time to give priority to recently
1219 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
1220 if (offset == queue->R[i])
1223 BUILD_BUG_ON(LZX_MAX_WINDOW_SIZE >= (block_cost_t)~0U);
1227 /* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
1230 * The cost model and codeword lengths are almost the same thing, but the
1231 * Huffman codewords with length 0 correspond to symbols with zero frequency
1232 * that still need to be assigned actual costs. The specific values assigned
1233 * are arbitrary, but they should be fairly high (near the maximum codeword
1234 * length) to take into account the fact that uses of these symbols are expected
1237 lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
1240 unsigned num_main_syms = ctx->num_main_syms;
1243 for (i = 0; i < num_main_syms; i++) {
1244 ctx->costs.main[i] = lens->main[i];
1245 if (ctx->costs.main[i] == 0)
1246 ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
1250 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1251 ctx->costs.len[i] = lens->len[i];
1252 if (ctx->costs.len[i] == 0)
1253 ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
1256 /* Aligned offset code */
1257 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1258 ctx->costs.aligned[i] = lens->aligned[i];
1259 if (ctx->costs.aligned[i] == 0)
1260 ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
1264 /* Advance the suffix array match-finder to the next position. */
1266 lzx_lz_update_salink(input_idx_t i,
1267 const input_idx_t SA[restrict],
1268 const input_idx_t ISA[restrict],
1269 struct salink link[restrict])
1271 /* r = Rank of the suffix at the current position. */
1272 const input_idx_t r = ISA[i];
1274 /* next = rank of LOWEST ranked suffix that is ranked HIGHER than the
1275 * current suffix AND has a LOWER position, or -1 if none exists. */
1276 const input_idx_t next = link[r].next;
1278 /* prev = rank of HIGHEST ranked suffix that is ranked LOWER than the
1279 * current suffix AND has a LOWER position, or -1 if none exists. */
1280 const input_idx_t prev = link[r].prev;
1282 /* Link the suffix at the current position into the linked list that
1283 * contains all suffixes in the suffix array that are appear at or
1284 * before the current position, sorted by rank.
1286 * Save the values of all fields we overwrite so that rollback is
1288 if (next != (input_idx_t)~0U) {
1290 link[next].prev = r;
1291 link[next].lcpprev = link[r].lcpnext;
1294 if (prev != (input_idx_t)~0U) {
1296 link[prev].next = r;
1297 link[prev].lcpnext = link[r].lcpprev;
1302 * Use the suffix array match-finder to retrieve a list of LZ matches at the
1305 * [in] @i Current position in the window.
1306 * [in] @SA Suffix array for the window.
1307 * [in] @ISA Inverse suffix array for the window.
1308 * [inout] @link Suffix array links used internally by the match-finder.
1309 * [out] @matches The (length, offset) pairs of the resulting matches will
1310 * be written here, sorted in decreasing order by
1311 * length. All returned lengths will be unique.
1312 * [in] @queue Recently used match offsets, used when evaluating the
1314 * [in] @min_match_len Minimum match length to return.
1315 * [in] @max_matches_to_consider Maximum number of matches to consider at
1317 * [in] @max_matches_to_return Maximum number of matches to return.
1319 * The return value is the number of matches found and written to @matches.
1322 lzx_lz_get_matches(const input_idx_t i,
1323 const input_idx_t SA[const restrict],
1324 const input_idx_t ISA[const restrict],
1325 struct salink link[const restrict],
1326 struct raw_match matches[const restrict],
1327 const struct lzx_lru_queue * const restrict queue,
1328 const unsigned min_match_len,
1329 const u32 max_matches_to_consider,
1330 const u32 max_matches_to_return)
1332 /* r = Rank of the suffix at the current position. */
1333 const input_idx_t r = ISA[i];
1335 /* Prepare for searching the current position. */
1336 lzx_lz_update_salink(i, SA, ISA, link);
1338 /* L = rank of next suffix to the left;
1339 * R = rank of next suffix to the right;
1340 * lenL = length of match between current position and the suffix with rank L;
1341 * lenR = length of match between current position and the suffix with rank R.
1343 * This is left and right relative to the rank of the current suffix.
1344 * Since the suffixes in the suffix array are sorted, the longest
1345 * matches are immediately to the left and right (using the linked list
1346 * to ignore all suffixes that occur later in the window). The match
1347 * length decreases the farther left and right we go. We shall keep the
1348 * length on both sides in sync in order to choose the lowest-cost match
1351 input_idx_t L = link[r].prev;
1352 input_idx_t R = link[r].next;
1353 input_idx_t lenL = link[r].lcpprev;
1354 input_idx_t lenR = link[r].lcpnext;
1356 /* nmatches = number of matches found so far. */
1357 unsigned nmatches = 0;
1359 /* best_cost = cost of lowest-cost match found so far.
1361 * We keep track of this so that we can ignore shorter matches that do
1362 * not have lower costs than a longer matches already found.
1364 block_cost_t best_cost = INFINITE_BLOCK_COST;
1366 /* count_remaining = maximum number of possible matches remaining to be
1368 u32 count_remaining = max_matches_to_consider;
1370 /* pending = match currently being considered for a specific length. */
1371 struct raw_match pending;
1372 block_cost_t pending_cost;
1374 while (lenL >= min_match_len || lenR >= min_match_len)
1377 pending_cost = INFINITE_BLOCK_COST;
1381 if (lenL >= min_match_len && lenL >= lenR) {
1384 if (--count_remaining == 0)
1385 goto out_save_pending;
1387 input_idx_t offset = i - SA[L];
1389 /* Save match if it has smaller cost. */
1390 cost = lzx_match_cost_fast(offset, queue);
1391 if (cost < pending_cost) {
1392 pending.offset = offset;
1393 pending_cost = cost;
1396 if (link[L].lcpprev < lenL) {
1397 /* Match length decreased. */
1399 lenL = link[L].lcpprev;
1401 /* Save the pending match unless the
1402 * right side still may have matches of
1403 * this length to be scanned, or if a
1404 * previous (longer) match had lower
1406 if (pending.len > lenR) {
1407 if (pending_cost < best_cost) {
1408 best_cost = pending_cost;
1409 matches[nmatches++] = pending;
1410 if (nmatches == max_matches_to_return)
1414 pending_cost = INFINITE_BLOCK_COST;
1416 if (lenL < min_match_len || lenL < lenR)
1426 if (lenR >= min_match_len && lenR > lenL) {
1429 if (--count_remaining == 0)
1430 goto out_save_pending;
1432 input_idx_t offset = i - SA[R];
1434 /* Save match if it has smaller cost. */
1435 cost = lzx_match_cost_fast(offset, queue);
1436 if (cost < pending_cost) {
1437 pending.offset = offset;
1438 pending_cost = cost;
1441 if (link[R].lcpnext < lenR) {
1442 /* Match length decreased. */
1444 lenR = link[R].lcpnext;
1446 /* Save the pending match unless a
1447 * previous (longer) match had lower
1449 if (pending_cost < best_cost) {
1450 matches[nmatches++] = pending;
1451 best_cost = pending_cost;
1452 if (nmatches == max_matches_to_return)
1456 if (lenR < min_match_len || lenR <= lenL)
1460 pending_cost = INFINITE_BLOCK_COST;
1469 if (pending_cost != INFINITE_BLOCK_COST)
1470 matches[nmatches++] = pending;
1477 /* Tell the match-finder to skip the specified number of bytes (@n) in the
1480 lzx_lz_skip_bytes(struct lzx_compressor *ctx, unsigned n)
1482 LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
1483 if (ctx->matches_cached) {
1484 ctx->match_window_pos += n;
1486 ctx->cached_matches_pos +=
1487 ctx->cached_matches[ctx->cached_matches_pos].len + 1;
1491 ctx->cached_matches[ctx->cached_matches_pos++].len = 0;
1492 lzx_lz_update_salink(ctx->match_window_pos++, ctx->SA,
1493 ctx->ISA, ctx->salink);
1498 /* Retrieve a list of matches available at the next position in the input.
1500 * The matches are written to ctx->matches in decreasing order of length, and
1501 * the return value is the number of matches found. */
1503 lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
1504 const struct lzx_lru_queue *queue,
1505 struct raw_match **matches_ret)
1507 unsigned num_matches;
1508 struct raw_match *matches;
1510 LZX_ASSERT(ctx->match_window_pos <= ctx->match_window_end);
1512 matches = &ctx->cached_matches[ctx->cached_matches_pos + 1];
1514 if (ctx->matches_cached) {
1515 num_matches = matches[-1].len;
1517 unsigned min_match_len = LZX_MIN_MATCH_LEN;
1518 if (!ctx->params.alg_params.slow.use_len2_matches)
1519 min_match_len = max(min_match_len, 3);
1520 const u32 max_search_depth = ctx->params.alg_params.slow.max_search_depth;
1521 const u32 max_matches_per_pos = ctx->params.alg_params.slow.max_matches_per_pos;
1523 if (unlikely(max_search_depth == 0 || max_matches_per_pos == 0))
1526 num_matches = lzx_lz_get_matches(ctx->match_window_pos,
1534 max_matches_per_pos);
1535 matches[-1].len = num_matches;
1537 ctx->cached_matches_pos += num_matches + 1;
1538 *matches_ret = matches;
1540 /* Cap the length of returned matches to the number of bytes remaining,
1541 * if it is not the whole window. */
1542 if (ctx->match_window_end < ctx->window_size) {
1543 unsigned maxlen = ctx->match_window_end - ctx->match_window_pos;
1544 for (unsigned i = 0; i < num_matches; i++)
1545 if (matches[i].len > maxlen)
1546 matches[i].len = maxlen;
1549 fprintf(stderr, "Pos %u/%u: %u matches\n",
1550 ctx->match_window_pos, ctx->match_window_end, num_matches);
1551 for (unsigned i = 0; i < num_matches; i++)
1552 fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
1555 #ifdef ENABLE_LZX_DEBUG
1556 for (unsigned i = 0; i < num_matches; i++) {
1557 LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
1558 LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
1559 LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
1560 LZX_ASSERT(matches[i].offset > 0);
1561 LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
1562 LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
1563 &ctx->window[ctx->match_window_pos - matches[i].offset],
1568 ctx->match_window_pos++;
1573 * Reverse the linked list of near-optimal matches so that they can be returned
1574 * in forwards order.
1576 * Returns the first match in the list.
1578 static struct raw_match
1579 lzx_lz_reverse_near_optimal_match_list(struct lzx_compressor *ctx,
1582 unsigned prev_link, saved_prev_link;
1583 unsigned prev_match_offset, saved_prev_match_offset;
1585 ctx->optimum_end_idx = cur_pos;
1587 saved_prev_link = ctx->optimum[cur_pos].prev.link;
1588 saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
1591 prev_link = saved_prev_link;
1592 prev_match_offset = saved_prev_match_offset;
1594 saved_prev_link = ctx->optimum[prev_link].prev.link;
1595 saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
1597 ctx->optimum[prev_link].next.link = cur_pos;
1598 ctx->optimum[prev_link].next.match_offset = prev_match_offset;
1600 cur_pos = prev_link;
1601 } while (cur_pos != 0);
1603 ctx->optimum_cur_idx = ctx->optimum[0].next.link;
1605 return (struct raw_match)
1606 { .len = ctx->optimum_cur_idx,
1607 .offset = ctx->optimum[0].next.match_offset,
1612 * lzx_lz_get_near_optimal_match() -
1614 * Choose the optimal match or literal to use at the next position in the input.
1616 * Unlike a greedy parser that always takes the longest match, or even a
1617 * parser with one match/literal look-ahead like zlib, the algorithm used here
1618 * may look ahead many matches/literals to determine the optimal match/literal to
1619 * output next. The motivation is that the compression ratio is improved if the
1620 * compressor can do things like use a shorter-than-possible match in order to
1621 * allow a longer match later, and also take into account the Huffman code cost
1622 * model rather than simply assuming that longer is better.
1624 * Still, this is not truly an optimal parser because very long matches are
1625 * taken immediately, and the raw match-finder takes some shortcuts. This is
1626 * done to avoid considering many different alternatives that are unlikely to
1627 * be significantly better.
1629 * This algorithm is based on that used in 7-Zip's DEFLATE encoder.
1631 * Each call to this function does one of two things:
1633 * 1. Build a near-optimal sequence of matches/literals, up to some point, that
1634 * will be returned by subsequent calls to this function, then return the
1639 * 2. Return the next match/literal previously computed by a call to this
1642 * This function relies on the following state in the compressor context:
1644 * ctx->window (read-only: preprocessed data being compressed)
1645 * ctx->cost (read-only: cost model to use)
1646 * ctx->optimum (internal state; leave uninitialized)
1647 * ctx->optimum_cur_idx (must set to 0 before first call)
1648 * ctx->optimum_end_idx (must set to 0 before first call)
1650 * Plus any state used by the raw match-finder.
1652 * The return value is a (length, offset) pair specifying the match or literal
1653 * chosen. For literals, the length is less than LZX_MIN_MATCH_LEN and the
1654 * offset is meaningless.
1656 static struct raw_match
1657 lzx_lz_get_near_optimal_match(struct lzx_compressor * ctx)
1659 unsigned num_possible_matches;
1660 struct raw_match *possible_matches;
1661 struct raw_match match;
1662 unsigned longest_match_len;
1664 if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
1665 /* Case 2: Return the next match/literal already found. */
1666 match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
1667 ctx->optimum_cur_idx;
1668 match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
1670 ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
1674 /* Case 1: Compute a new list of matches/literals to return. */
1676 ctx->optimum_cur_idx = 0;
1677 ctx->optimum_end_idx = 0;
1679 /* Get matches at this position. */
1680 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->queue, &possible_matches);
1682 /* If no matches found, return literal. */
1683 if (num_possible_matches == 0)
1684 return (struct raw_match){ .len = 0 };
1686 /* The matches that were found are sorted in decreasing order by length.
1687 * Get the length of the longest one. */
1688 longest_match_len = possible_matches[0].len;
1690 /* Greedy heuristic: if the longest match that was found is greater
1691 * than the number of fast bytes, return it immediately; don't both
1692 * doing more work. */
1693 if (longest_match_len > ctx->params.alg_params.slow.num_fast_bytes) {
1694 lzx_lz_skip_bytes(ctx, longest_match_len - 1);
1695 return possible_matches[0];
1698 /* Calculate the cost to reach the next position by outputting a
1700 ctx->optimum[0].queue = ctx->queue;
1701 ctx->optimum[1].queue = ctx->optimum[0].queue;
1702 ctx->optimum[1].cost = lzx_literal_cost(ctx->window[ctx->match_window_pos],
1704 ctx->optimum[1].prev.link = 0;
1706 /* Calculate the cost to reach any position up to and including that
1707 * reached by the longest match, using the shortest (i.e. closest) match
1708 * that reaches each position. */
1709 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1710 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1711 len <= longest_match_len; len++) {
1713 LZX_ASSERT(match_idx < num_possible_matches);
1715 ctx->optimum[len].queue = ctx->optimum[0].queue;
1716 ctx->optimum[len].prev.link = 0;
1717 ctx->optimum[len].prev.match_offset = possible_matches[match_idx].offset;
1718 ctx->optimum[len].cost = lzx_match_cost(len,
1719 possible_matches[match_idx].offset,
1721 &ctx->optimum[len].queue);
1722 if (len == possible_matches[match_idx].len)
1726 unsigned cur_pos = 0;
1728 /* len_end: greatest index forward at which costs have been calculated
1730 unsigned len_end = longest_match_len;
1733 /* Advance to next position. */
1736 if (cur_pos == len_end || cur_pos == LZX_OPTIM_ARRAY_SIZE)
1737 return lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1739 /* retrieve the number of matches available at this position */
1740 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->optimum[cur_pos].queue,
1743 unsigned new_len = 0;
1745 if (num_possible_matches != 0) {
1746 new_len = possible_matches[0].len;
1748 /* Greedy heuristic: if we found a match greater than
1749 * the number of fast bytes, stop immediately. */
1750 if (new_len > ctx->params.alg_params.slow.num_fast_bytes) {
1752 /* Build the list of matches to return and get
1754 match = lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1756 /* Append the long match to the end of the list. */
1757 ctx->optimum[cur_pos].next.match_offset =
1758 possible_matches[0].offset;
1759 ctx->optimum[cur_pos].next.link = cur_pos + new_len;
1760 ctx->optimum_end_idx = cur_pos + new_len;
1762 /* Skip over the remaining bytes of the long match. */
1763 lzx_lz_skip_bytes(ctx, new_len - 1);
1765 /* Return first match in the list */
1770 /* Consider proceeding with a literal byte. */
1771 block_cost_t cur_cost = ctx->optimum[cur_pos].cost;
1772 block_cost_t cur_plus_literal_cost = cur_cost +
1773 lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
1775 if (cur_plus_literal_cost < ctx->optimum[cur_pos + 1].cost) {
1776 ctx->optimum[cur_pos + 1].cost = cur_plus_literal_cost;
1777 ctx->optimum[cur_pos + 1].prev.link = cur_pos;
1778 ctx->optimum[cur_pos + 1].queue = ctx->optimum[cur_pos].queue;
1781 if (num_possible_matches == 0)
1784 /* Consider proceeding with a match. */
1786 while (len_end < cur_pos + new_len)
1787 ctx->optimum[++len_end].cost = INFINITE_BLOCK_COST;
1789 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1790 len <= new_len; len++) {
1791 LZX_ASSERT(match_idx < num_possible_matches);
1792 struct lzx_lru_queue q = ctx->optimum[cur_pos].queue;
1793 block_cost_t cost = cur_cost + lzx_match_cost(len,
1794 possible_matches[match_idx].offset,
1798 if (cost < ctx->optimum[cur_pos + len].cost) {
1799 ctx->optimum[cur_pos + len].cost = cost;
1800 ctx->optimum[cur_pos + len].prev.link = cur_pos;
1801 ctx->optimum[cur_pos + len].prev.match_offset =
1802 possible_matches[match_idx].offset;
1803 ctx->optimum[cur_pos + len].queue = q;
1806 if (len == possible_matches[match_idx].len)
1813 * Set default symbol costs.
1816 lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
1820 /* Literal symbols */
1821 for (i = 0; i < LZX_NUM_CHARS; i++)
1824 /* Match header symbols */
1825 for (; i < num_main_syms; i++)
1826 costs->main[i] = 10;
1828 /* Length symbols */
1829 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1832 /* Aligned offset symbols */
1833 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1834 costs->aligned[i] = 3;
1837 /* Given the frequencies of symbols in a compressed block and the corresponding
1838 * Huffman codes, return LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM if an
1839 * aligned offset or verbatim block, respectively, will take fewer bits to
1842 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1843 const struct lzx_codes * codes)
1845 unsigned aligned_cost = 0;
1846 unsigned verbatim_cost = 0;
1848 /* Verbatim blocks have a constant 3 bits per position footer. Aligned
1849 * offset blocks have an aligned offset symbol per position footer, plus
1850 * an extra 24 bits to output the lengths necessary to reconstruct the
1851 * aligned offset code itself. */
1852 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1853 verbatim_cost += 3 * freqs->aligned[i];
1854 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1856 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1857 if (aligned_cost < verbatim_cost)
1858 return LZX_BLOCKTYPE_ALIGNED;
1860 return LZX_BLOCKTYPE_VERBATIM;
1863 /* Find a near-optimal sequence of matches/literals with which to output the
1864 * specified LZX block, then set its type to that which has the minimum cost to
1867 lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
1868 unsigned num_passes)
1870 const struct lzx_lru_queue orig_queue = ctx->queue;
1871 struct lzx_freqs freqs;
1873 unsigned orig_window_pos = spec->window_pos;
1874 unsigned orig_cached_pos = ctx->cached_matches_pos;
1876 LZX_ASSERT(ctx->match_window_pos == spec->window_pos);
1878 ctx->match_window_end = spec->window_pos + spec->block_size;
1879 spec->chosen_matches_start_pos = spec->window_pos;
1881 LZX_ASSERT(num_passes >= 1);
1883 /* The first optimal parsing pass is done using the cost model already
1884 * set in ctx->costs. Each later pass is done using a cost model
1885 * computed from the previous pass. */
1886 for (unsigned pass = 0; pass < num_passes; pass++) {
1888 ctx->match_window_pos = orig_window_pos;
1889 ctx->cached_matches_pos = orig_cached_pos;
1890 ctx->queue = orig_queue;
1891 spec->num_chosen_matches = 0;
1892 memset(&freqs, 0, sizeof(freqs));
1894 for (unsigned i = spec->window_pos; i < spec->window_pos + spec->block_size; ) {
1895 struct raw_match raw_match;
1896 struct lzx_match lzx_match;
1898 raw_match = lzx_lz_get_near_optimal_match(ctx);
1899 if (raw_match.len >= LZX_MIN_MATCH_LEN) {
1900 lzx_match.data = lzx_tally_match(raw_match.len, raw_match.offset,
1901 &freqs, &ctx->queue);
1904 lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
1907 ctx->chosen_matches[spec->chosen_matches_start_pos +
1908 spec->num_chosen_matches++] = lzx_match;
1911 lzx_make_huffman_codes(&freqs, &spec->codes,
1912 ctx->num_main_syms);
1913 if (pass < num_passes - 1)
1914 lzx_set_costs(ctx, &spec->codes.lens);
1915 ctx->matches_cached = true;
1917 spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
1918 ctx->matches_cached = false;
1922 lzx_optimize_blocks(struct lzx_compressor *ctx)
1924 lzx_lru_queue_init(&ctx->queue);
1925 ctx->optimum_cur_idx = 0;
1926 ctx->optimum_end_idx = 0;
1928 const unsigned num_passes = ctx->params.alg_params.slow.num_optim_passes;
1930 for (unsigned i = 0; i < ctx->num_blocks; i++)
1931 lzx_optimize_block(ctx, &ctx->block_specs[i], num_passes);
1934 /* Initialize the suffix array match-finder for the specified input. */
1936 lzx_lz_init_matchfinder(const u8 T[const restrict],
1937 const input_idx_t n,
1938 input_idx_t SA[const restrict],
1939 input_idx_t ISA[const restrict],
1940 input_idx_t LCP[const restrict],
1941 struct salink link[const restrict],
1942 const unsigned max_match_len)
1944 /* Compute SA (Suffix Array). */
1947 /* ISA and link are used as temporary space. */
1948 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * sizeof(ISA[0]) < 256 * sizeof(saidx_t));
1949 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * 2 * sizeof(link[0]) < 256 * 256 * sizeof(saidx_t));
1951 if (sizeof(input_idx_t) == sizeof(saidx_t)) {
1952 divsufsort(T, SA, n, (saidx_t*)ISA, (saidx_t*)link);
1955 divsufsort(T, sa, n, (saidx_t*)ISA, (saidx_t*)link);
1956 for (input_idx_t i = 0; i < n; i++)
1961 #ifdef ENABLE_LZX_DEBUG
1965 /* Verify suffix array. */
1969 for (input_idx_t r = 0; r < n; r++) {
1970 input_idx_t i = SA[r];
1972 LZX_ASSERT(!found[i]);
1977 for (input_idx_t r = 0; r < n - 1; r++) {
1979 input_idx_t i1 = SA[r];
1980 input_idx_t i2 = SA[r + 1];
1982 input_idx_t n1 = n - i1;
1983 input_idx_t n2 = n - i2;
1985 LZX_ASSERT(memcmp(&T[i1], &T[i2], min(n1, n2)) <= 0);
1987 LZX_DEBUG("Verified SA (len %u)", n);
1988 #endif /* ENABLE_LZX_DEBUG */
1990 /* Compute ISA (Inverse Suffix Array) */
1991 for (input_idx_t r = 0; r < n; r++)
1994 /* Compute LCP (longest common prefix) array.
1996 * Algorithm adapted from Kasai et al. 2001: "Linear-Time
1997 * Longest-Common-Prefix Computation in Suffix Arrays and Its
2001 for (input_idx_t i = 0; i < n; i++) {
2002 input_idx_t r = ISA[i];
2004 input_idx_t j = SA[r - 1];
2006 input_idx_t lim = min(n - i, n - j);
2008 while (h < lim && T[i + h] == T[j + h])
2017 #ifdef ENABLE_LZX_DEBUG
2018 /* Verify LCP array. */
2019 for (input_idx_t r = 0; r < n - 1; r++) {
2020 LZX_ASSERT(ISA[SA[r]] == r);
2021 LZX_ASSERT(ISA[SA[r + 1]] == r + 1);
2023 input_idx_t i1 = SA[r];
2024 input_idx_t i2 = SA[r + 1];
2025 input_idx_t lcp = LCP[r + 1];
2027 input_idx_t n1 = n - i1;
2028 input_idx_t n2 = n - i2;
2030 LZX_ASSERT(lcp <= min(n1, n2));
2032 LZX_ASSERT(memcmp(&T[i1], &T[i2], lcp) == 0);
2033 if (lcp < min(n1, n2))
2034 LZX_ASSERT(T[i1 + lcp] != T[i2 + lcp]);
2036 #endif /* ENABLE_LZX_DEBUG */
2038 /* Compute salink.next and salink.lcpnext.
2040 * Algorithm adapted from Crochemore et al. 2009:
2041 * "LPF computation revisited".
2043 * Note: we cap lcpnext to the maximum match length so that the
2044 * match-finder need not worry about it later. */
2045 link[n - 1].next = (input_idx_t)~0U;
2046 link[n - 1].prev = (input_idx_t)~0U;
2047 link[n - 1].lcpnext = 0;
2048 link[n - 1].lcpprev = 0;
2049 for (input_idx_t r = n - 2; r != (input_idx_t)~0U; r--) {
2050 input_idx_t t = r + 1;
2051 input_idx_t l = LCP[t];
2052 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2053 l = min(l, link[t].lcpnext);
2057 link[r].lcpnext = min(l, max_match_len);
2058 LZX_ASSERT(t == (input_idx_t)~0U || l <= n - SA[t]);
2059 LZX_ASSERT(l <= n - SA[r]);
2060 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2063 /* Compute salink.prev and salink.lcpprev.
2065 * Algorithm adapted from Crochemore et al. 2009:
2066 * "LPF computation revisited".
2068 * Note: we cap lcpprev to the maximum match length so that the
2069 * match-finder need not worry about it later. */
2070 link[0].prev = (input_idx_t)~0;
2071 link[0].next = (input_idx_t)~0;
2072 link[0].lcpprev = 0;
2073 link[0].lcpnext = 0;
2074 for (input_idx_t r = 1; r < n; r++) {
2075 input_idx_t t = r - 1;
2076 input_idx_t l = LCP[r];
2077 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2078 l = min(l, link[t].lcpprev);
2082 link[r].lcpprev = min(l, max_match_len);
2083 LZX_ASSERT(t == (input_idx_t)~0 || l <= n - SA[t]);
2084 LZX_ASSERT(l <= n - SA[r]);
2085 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2089 /* Prepare the input window into one or more LZX blocks ready to be output. */
2091 lzx_prepare_blocks(struct lzx_compressor * ctx)
2093 /* Initialize the match-finder. */
2094 lzx_lz_init_matchfinder(ctx->window, ctx->window_size,
2095 ctx->SA, ctx->ISA, ctx->LCP, ctx->salink,
2097 ctx->cached_matches_pos = 0;
2098 ctx->matches_cached = false;
2099 ctx->match_window_pos = 0;
2101 /* Set up a default cost model. */
2102 lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
2104 ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
2105 for (unsigned i = 0; i < ctx->num_blocks; i++) {
2106 unsigned pos = LZX_DIV_BLOCK_SIZE * i;
2107 ctx->block_specs[i].window_pos = pos;
2108 ctx->block_specs[i].block_size = min(ctx->window_size - pos, LZX_DIV_BLOCK_SIZE);
2111 /* Determine sequence of matches/literals to output for each block. */
2112 lzx_optimize_blocks(ctx);
2116 * This is the fast version of lzx_prepare_blocks(). This version "quickly"
2117 * prepares a single compressed block containing the entire input. See the
2118 * description of the "Fast algorithm" at the beginning of this file for more
2121 * Input --- the preprocessed data:
2126 * Output --- the block specification and the corresponding match/literal data:
2128 * ctx->block_specs[]
2130 * ctx->chosen_matches[]
2133 lzx_prepare_block_fast(struct lzx_compressor * ctx)
2135 struct lzx_record_ctx record_ctx;
2136 struct lzx_block_spec *spec;
2138 /* Parameters to hash chain LZ match finder
2139 * (lazy with 1 match lookahead) */
2140 static const struct lz_params lzx_lz_params = {
2141 /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
2142 * aren't worth choosing when using greedy or lazy parsing. */
2144 .max_match = LZX_MAX_MATCH_LEN,
2145 .max_offset = 32768,
2146 .good_match = LZX_MAX_MATCH_LEN,
2147 .nice_match = LZX_MAX_MATCH_LEN,
2148 .max_chain_len = LZX_MAX_MATCH_LEN,
2149 .max_lazy_match = LZX_MAX_MATCH_LEN,
2153 /* Initialize symbol frequencies and match offset LRU queue. */
2154 memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
2155 lzx_lru_queue_init(&record_ctx.queue);
2156 record_ctx.matches = ctx->chosen_matches;
2158 /* Determine series of matches/literals to output. */
2159 lz_analyze_block(ctx->window,
2167 /* Set up block specification. */
2168 spec = &ctx->block_specs[0];
2169 spec->block_type = LZX_BLOCKTYPE_ALIGNED;
2170 spec->window_pos = 0;
2171 spec->block_size = ctx->window_size;
2172 spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
2173 spec->chosen_matches_start_pos = 0;
2174 lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
2175 ctx->num_main_syms);
2176 ctx->num_blocks = 1;
2180 do_call_insn_translation(u32 *call_insn_target, int input_pos,
2186 rel_offset = le32_to_cpu(*call_insn_target);
2187 if (rel_offset >= -input_pos && rel_offset < file_size) {
2188 if (rel_offset < file_size - input_pos) {
2189 /* "good translation" */
2190 abs_offset = rel_offset + input_pos;
2192 /* "compensating translation" */
2193 abs_offset = rel_offset - file_size;
2195 *call_insn_target = cpu_to_le32(abs_offset);
2199 /* This is the reverse of undo_call_insn_preprocessing() in lzx-decompress.c.
2200 * See the comment above that function for more information. */
2202 do_call_insn_preprocessing(u8 data[], int size)
2204 for (int i = 0; i < size - 10; i++) {
2205 if (data[i] == 0xe8) {
2206 do_call_insn_translation((u32*)&data[i + 1], i,
2207 LZX_WIM_MAGIC_FILESIZE);
2213 /* API function documented in wimlib.h */
2215 wimlib_lzx_compress2(const void * const restrict uncompressed_data,
2216 unsigned const uncompressed_len,
2217 void * const restrict compressed_data,
2218 struct wimlib_lzx_context * const restrict lzx_ctx)
2220 struct lzx_compressor *ctx = (struct lzx_compressor*)lzx_ctx;
2221 struct output_bitstream ostream;
2222 input_idx_t compressed_len;
2224 if (uncompressed_len < 100) {
2225 LZX_DEBUG("Too small to bother compressing.");
2229 if (uncompressed_len > ctx->max_window_size) {
2230 LZX_DEBUG("Can't compress %u bytes using window of %u bytes!",
2231 uncompressed_len, ctx->max_window_size);
2235 LZX_DEBUG("Attempting to compress %u bytes...", uncompressed_len);
2237 /* The input data must be preprocessed. To avoid changing the original
2238 * input, copy it to a temporary buffer. */
2239 memcpy(ctx->window, uncompressed_data, uncompressed_len);
2240 ctx->window_size = uncompressed_len;
2242 /* This line is unnecessary; it just avoids inconsequential accesses of
2243 * uninitialized memory that would show up in memory-checking tools such
2245 memset(&ctx->window[ctx->window_size], 0, 12);
2247 LZX_DEBUG("Preprocessing data...");
2249 /* Before doing any actual compression, do the call instruction (0xe8
2250 * byte) translation on the uncompressed data. */
2251 do_call_insn_preprocessing(ctx->window, ctx->window_size);
2253 LZX_DEBUG("Preparing blocks...");
2255 /* Prepare the compressed data. */
2256 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
2257 lzx_prepare_block_fast(ctx);
2259 lzx_prepare_blocks(ctx);
2261 LZX_DEBUG("Writing compressed blocks...");
2263 /* Generate the compressed data. */
2264 init_output_bitstream(&ostream, compressed_data, ctx->window_size - 1);
2265 lzx_write_all_blocks(ctx, &ostream);
2267 LZX_DEBUG("Flushing bitstream...");
2268 compressed_len = flush_output_bitstream(&ostream);
2269 if (compressed_len == ~(input_idx_t)0) {
2270 LZX_DEBUG("Data did not compress to less than original length!");
2274 LZX_DEBUG("Done: compressed %u => %u bytes.",
2275 uncompressed_len, compressed_len);
2277 /* Verify that we really get the same thing back when decompressing.
2278 * Although this could be disabled by default in all cases, it only
2279 * takes around 2-3% of the running time of the slow algorithm to do the
2281 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
2282 #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
2287 /* The decompression buffer can be any temporary space that's no
2289 u8 *buf = (u8*)(ctx->SA ? ctx->SA : ctx->prev_tab);
2291 if (wimlib_lzx_decompress2(compressed_data, compressed_len,
2292 buf, uncompressed_len, ctx->max_window_size))
2294 ERROR("Failed to decompress data we "
2295 "compressed using LZX algorithm");
2300 if (memcmp(uncompressed_data, buf, uncompressed_len)) {
2301 ERROR("Data we compressed using LZX algorithm "
2302 "didn't decompress to original");
2307 return compressed_len;
2311 lzx_params_compatible(const struct wimlib_lzx_params *oldparams,
2312 const struct wimlib_lzx_params *newparams)
2314 return 0 == memcmp(oldparams, newparams, sizeof(struct wimlib_lzx_params));
2317 static struct wimlib_lzx_params lzx_user_default_params;
2318 static struct wimlib_lzx_params *lzx_user_default_params_ptr;
2321 lzx_params_valid(const struct wimlib_lzx_params *params)
2323 /* Validate parameters. */
2324 if (params->size_of_this != sizeof(struct wimlib_lzx_params)) {
2325 LZX_DEBUG("Invalid parameter structure size!");
2329 if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
2330 params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
2332 LZX_DEBUG("Invalid algorithm.");
2336 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2337 if (params->alg_params.slow.num_optim_passes < 1)
2339 LZX_DEBUG("Invalid number of optimization passes!");
2343 if (params->alg_params.slow.main_nostat_cost < 1 ||
2344 params->alg_params.slow.main_nostat_cost > 16)
2346 LZX_DEBUG("Invalid main_nostat_cost!");
2350 if (params->alg_params.slow.len_nostat_cost < 1 ||
2351 params->alg_params.slow.len_nostat_cost > 16)
2353 LZX_DEBUG("Invalid len_nostat_cost!");
2357 if (params->alg_params.slow.aligned_nostat_cost < 1 ||
2358 params->alg_params.slow.aligned_nostat_cost > 8)
2360 LZX_DEBUG("Invalid aligned_nostat_cost!");
2367 /* API function documented in wimlib.h */
2369 wimlib_lzx_set_default_params(const struct wimlib_lzx_params * params)
2372 if (!lzx_params_valid(params))
2373 return WIMLIB_ERR_INVALID_PARAM;
2374 lzx_user_default_params = *params;
2375 lzx_user_default_params_ptr = &lzx_user_default_params;
2377 lzx_user_default_params_ptr = NULL;
2382 /* API function documented in wimlib.h */
2384 wimlib_lzx_alloc_context(u32 window_size,
2385 const struct wimlib_lzx_params *params,
2386 struct wimlib_lzx_context **ctx_pp)
2389 LZX_DEBUG("Allocating LZX context...");
2391 if (!lzx_window_size_valid(window_size))
2392 return WIMLIB_ERR_INVALID_PARAM;
2394 struct lzx_compressor *ctx;
2396 static const struct wimlib_lzx_params fast_default = {
2397 .size_of_this = sizeof(struct wimlib_lzx_params),
2398 .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
2405 static const struct wimlib_lzx_params slow_default = {
2406 .size_of_this = sizeof(struct wimlib_lzx_params),
2407 .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
2411 .use_len2_matches = 1,
2412 .num_fast_bytes = 32,
2413 .num_optim_passes = 2,
2414 .max_search_depth = 50,
2415 .max_matches_per_pos = 3,
2416 .main_nostat_cost = 15,
2417 .len_nostat_cost = 15,
2418 .aligned_nostat_cost = 7,
2424 if (!lzx_params_valid(params))
2425 return WIMLIB_ERR_INVALID_PARAM;
2427 LZX_DEBUG("Using default algorithm and parameters.");
2428 if (lzx_user_default_params_ptr)
2429 params = lzx_user_default_params_ptr;
2431 params = &slow_default;
2434 if (params->use_defaults) {
2435 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
2436 params = &slow_default;
2438 params = &fast_default;
2442 ctx = *(struct lzx_compressor**)ctx_pp;
2445 lzx_params_compatible(&ctx->params, params) &&
2446 ctx->max_window_size == window_size)
2449 LZX_DEBUG("Check parameters only.");
2453 LZX_DEBUG("Allocating memory.");
2455 ctx = CALLOC(1, sizeof(struct lzx_compressor));
2459 ctx->num_main_syms = lzx_get_num_main_syms(window_size);
2460 ctx->max_window_size = window_size;
2461 ctx->window = MALLOC(window_size + 12);
2462 if (ctx->window == NULL)
2465 if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
2466 ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
2467 if (ctx->prev_tab == NULL)
2471 size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
2472 ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
2473 if (ctx->block_specs == NULL)
2476 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2477 ctx->SA = MALLOC(3U * window_size * sizeof(ctx->SA[0]));
2478 if (ctx->SA == NULL)
2480 ctx->ISA = ctx->SA + window_size;
2481 ctx->LCP = ctx->ISA + window_size;
2483 ctx->salink = MALLOC(window_size * sizeof(ctx->salink[0]));
2484 if (ctx->salink == NULL)
2488 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2489 ctx->optimum = MALLOC((LZX_OPTIM_ARRAY_SIZE + LZX_MAX_MATCH_LEN) *
2490 sizeof(ctx->optimum[0]));
2491 if (ctx->optimum == NULL)
2495 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2498 cache_per_pos = params->alg_params.slow.max_matches_per_pos;
2499 if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
2500 cache_per_pos = LZX_MAX_CACHE_PER_POS;
2502 ctx->cached_matches = MALLOC(window_size * (cache_per_pos + 1) *
2503 sizeof(ctx->cached_matches[0]));
2504 if (ctx->cached_matches == NULL)
2508 ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
2509 if (ctx->chosen_matches == NULL)
2512 memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_params));
2513 memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
2515 LZX_DEBUG("Successfully allocated new LZX context.");
2517 wimlib_lzx_free_context(*ctx_pp);
2518 *ctx_pp = (struct wimlib_lzx_context*)ctx;
2522 wimlib_lzx_free_context((struct wimlib_lzx_context*)ctx);
2523 LZX_DEBUG("Ran out of memory.");
2524 return WIMLIB_ERR_NOMEM;
2527 /* API function documented in wimlib.h */
2529 wimlib_lzx_free_context(struct wimlib_lzx_context *_ctx)
2531 struct lzx_compressor *ctx = (struct lzx_compressor*)_ctx;
2534 FREE(ctx->chosen_matches);
2535 FREE(ctx->cached_matches);
2539 FREE(ctx->block_specs);
2540 FREE(ctx->prev_tab);
2546 /* API function documented in wimlib.h */
2548 wimlib_lzx_compress(const void * const restrict uncompressed_data,
2549 unsigned const uncompressed_len,
2550 void * const restrict compressed_data)
2553 struct wimlib_lzx_context *ctx = NULL;
2554 unsigned compressed_len;
2556 ret = wimlib_lzx_alloc_context(32768, NULL, &ctx);
2558 wimlib_assert(ret != WIMLIB_ERR_INVALID_PARAM);
2559 WARNING("Couldn't allocate LZX compression context: %"TS"",
2560 wimlib_get_error_string(ret));
2564 compressed_len = wimlib_lzx_compress2(uncompressed_data,
2569 wimlib_lzx_free_context(ctx);
2571 return compressed_len;