4 * LZX compression routines
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
28 * This file contains a compressor for the LZX compression format, as used in
29 * the WIM file format.
34 * First, the primary reference for the LZX compression format is the
35 * specification released by Microsoft.
37 * Second, the comments in lzx-decompress.c provide some more information about
38 * the LZX compression format, including errors in the Microsoft specification.
40 * Do note that LZX shares many similarities with DEFLATE, the algorithm used by
41 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding,
42 * and certain other details are quite similar, such as the method for storing
43 * Huffman codes. However, some of the main differences are:
45 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
46 * compressible before attempting to compress it further.
47 * - LZX uses a "main" alphabet which combines literals and matches, with the
48 * match symbols containing a "length header" (giving all or part of the match
49 * length) and a "position slot" (giving, roughly speaking, the order of
50 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks; however it does have two types of
52 * dynamic Huffman blocks ("aligned offset" and "verbatim").
53 * - LZX has a minimum match length of 2 rather than 3.
54 * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
55 * queue of match offsets. This is very useful for certain types of files,
56 * such as binary files that have repeating records.
61 * There are actually two distinct overall algorithms implemented here. We
62 * shall refer to them as the "slow" algorithm and the "fast" algorithm. The
63 * "slow" algorithm spends more time compressing to achieve a higher compression
64 * ratio compared to the "fast" algorithm. More details are presented below.
69 * The "slow" algorithm to generate LZX-compressed data is roughly as follows:
71 * 1. Preprocess the input data to translate the targets of x86 call
72 * instructions to absolute offsets.
74 * 2. Build the suffix array and inverse suffix array for the input data. The
75 * suffix array contains the indices of all suffixes of the input data,
76 * sorted lexcographically by the corresponding suffixes. The "position" of
77 * a suffix is the index of that suffix in the original string, whereas the
78 * "rank" of a suffix is the index at which that suffix's position is found
79 * in the suffix array.
81 * 3. Build the longest common prefix array corresponding to the suffix array.
83 * 4. For each suffix, find the highest lower ranked suffix that has a lower
84 * position, the lowest higher ranked suffix that has a lower position, and
85 * the length of the common prefix shared between each. This information is
86 * later used to link suffix ranks into a doubly-linked list for searching
89 * 5. Set a default cost model for matches/literals.
91 * 6. Determine the lowest cost sequence of LZ77 matches ((offset, length)
92 * pairs) and literal bytes to divide the input into. Raw match-finding is
93 * done by searching the suffix array using a linked list to avoid
94 * considering any suffixes that start after the current position. Each run
95 * of the match-finder returns the approximate lowest-cost longest match as
96 * well as any shorter matches that have even lower approximate costs. Each
97 * such run also adds the suffix rank of the current position into the linked
98 * list being used to search the suffix array. Parsing, or match-choosing,
99 * is solved as a minimum-cost path problem using a forward "optimal parsing"
100 * algorithm based on the Deflate encoder from 7-Zip. This algorithm moves
101 * forward calculating the minimum cost to reach each byte until either a
102 * very long match is found or until a position is found at which no matches
105 * 7. Build the Huffman codes needed to output the matches/literals.
107 * 8. Up to a certain number of iterations, use the resulting Huffman codes to
108 * refine a cost model and go back to Step #6 to determine an improved
109 * sequence of matches and literals.
111 * 9. Output the resulting block using the match/literal sequences and the
112 * Huffman codes that were computed for the block.
114 * Note: the algorithm does not yet attempt to split the input into multiple LZX
115 * blocks, instead using a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
120 * The fast algorithm (and the only one available in wimlib v1.5.1 and earlier)
121 * spends much less time on the main bottlenecks of the compression process ---
122 * that is, the match finding and match choosing. Matches are found and chosen
123 * with hash chains using a greedy parse with one position of look-ahead. No
124 * block splitting is done; only compressing the full input into an aligned
125 * offset block is considered.
130 * The old API (retained for backward compatibility) consists of just one
133 * wimlib_lzx_compress()
135 * The new compressor has more potential parameters and needs more memory, so
136 * the new API ties up memory allocations and compression parameters into a
139 * wimlib_lzx_alloc_context()
140 * wimlib_lzx_compress2()
141 * wimlib_lzx_free_context()
142 * wimlib_lzx_set_default_params()
144 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() are designed to
145 * compress an in-memory buffer of up to the window size, which can be any power
146 * of two between 2^15 and 2^21 inclusively. However, by default, the WIM
147 * format uses 2^15, and this is seemingly the only value that is compatible
148 * with WIMGAPI. In any case, the window is not a true "sliding window" since
149 * no data is ever "slid out" of the window. This is needed for the WIM format,
150 * which is designed such that chunks may be randomly accessed.
152 * Both wimlib_lzx_compress() and wimlib_lzx_compress2() return 0 if the data
153 * could not be compressed to less than the size of the uncompressed data.
154 * Again, this is suitable for the WIM format, which stores such data chunks
157 * The functions in this LZX compression API are exported from the library,
158 * although with the possible exception of wimlib_lzx_set_default_params(), this
159 * is only in case other programs happen to have uses for it other than WIM
160 * reading/writing as already handled through the rest of the library.
165 * Acknowledgments to several open-source projects and research papers that made
166 * it possible to implement this code:
168 * - divsufsort (author: Yuta Mori), for the suffix array construction code,
169 * located in a separate directory (divsufsort/).
171 * - "Linear-Time Longest-Common-Prefix Computation in Suffix Arrays and Its
172 * Applications" (Kasai et al. 2001), for the LCP array computation.
174 * - "LPF computation revisited" (Crochemore et al. 2009) for the prev and next
175 * array computations.
177 * - 7-Zip (author: Igor Pavlov) for the algorithm for forward optimal parsing
180 * - zlib (author: Jean-loup Gailly and Mark Adler), for the hash table
181 * match-finding algorithm (used in lz77.c).
183 * - lzx-compress (author: Matthew T. Russotto), on which some parts of this
184 * code were originally based.
192 #include "wimlib/compress.h"
193 #include "wimlib/endianness.h"
194 #include "wimlib/error.h"
195 #include "wimlib/lzx.h"
196 #include "wimlib/util.h"
201 #ifdef ENABLE_LZX_DEBUG
202 # include "wimlib/decompress.h"
205 #include "divsufsort/divsufsort.h"
207 typedef u32 block_cost_t;
208 #define INFINITE_BLOCK_COST ((block_cost_t)~0U)
210 #define LZX_OPTIM_ARRAY_SIZE 4096
212 #define LZX_DIV_BLOCK_SIZE 32768
214 #define LZX_MAX_CACHE_PER_POS 10
216 /* Codewords for the LZX main, length, and aligned offset Huffman codes */
217 struct lzx_codewords {
218 u16 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
219 u16 len[LZX_LENCODE_NUM_SYMBOLS];
220 u16 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
223 /* Codeword lengths (in bits) for the LZX main, length, and aligned offset
226 * A 0 length means the codeword has zero frequency.
229 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
230 u8 len[LZX_LENCODE_NUM_SYMBOLS];
231 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
234 /* Costs for the LZX main, length, and aligned offset Huffman symbols.
236 * If a codeword has zero frequency, it must still be assigned some nonzero cost
237 * --- generally a high cost, since even if it gets used in the next iteration,
238 * it probably will not be used very times. */
240 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
241 u8 len[LZX_LENCODE_NUM_SYMBOLS];
242 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
245 /* The LZX main, length, and aligned offset Huffman codes */
247 struct lzx_codewords codewords;
248 struct lzx_lens lens;
251 /* Tables for tallying symbol frequencies in the three LZX alphabets */
253 input_idx_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
254 input_idx_t len[LZX_LENCODE_NUM_SYMBOLS];
255 input_idx_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
258 /* LZX intermediate match/literal format */
262 * 31 1 if a match, 0 if a literal.
264 * 30-25 position slot. This can be at most 50, so it will fit in 6
267 * 8-24 position footer. This is the offset of the real formatted
268 * offset from the position base. This can be at most 17 bits
269 * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
271 * 0-7 length of match, minus 2. This can be at most
272 * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
276 /* Raw LZ match/literal format: just a length and offset.
278 * The length is the number of bytes of the match, and the offset is the number
279 * of bytes back in the input the match is from the current position.
281 * If @len < LZX_MIN_MATCH_LEN, then it's really just a literal byte and @offset is
288 /* Specification for an LZX block. */
289 struct lzx_block_spec {
291 /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
295 /* 0-based position in the window at which this block starts. */
296 input_idx_t window_pos;
298 /* The number of bytes of uncompressed data this block represents. */
299 input_idx_t block_size;
301 /* The position in the 'chosen_matches' array in the `struct
302 * lzx_compressor' at which the match/literal specifications for
303 * this block begin. */
304 input_idx_t chosen_matches_start_pos;
306 /* The number of match/literal specifications for this block. */
307 input_idx_t num_chosen_matches;
309 /* Huffman codes for this block. */
310 struct lzx_codes codes;
314 * An array of these structures is used during the match-choosing algorithm.
315 * They correspond to consecutive positions in the window and are used to keep
316 * track of the cost to reach each position, and the match/literal choices that
317 * need to be chosen to reach that position.
320 /* The approximate minimum cost, in bits, to reach this position in the
321 * window which has been found so far. */
324 /* The union here is just for clarity, since the fields are used in two
325 * slightly different ways. Initially, the @prev structure is filled in
326 * first, and links go from later in the window to earlier in the
327 * window. Later, @next structure is filled in and links go from
328 * earlier in the window to later in the window. */
331 /* Position of the start of the match or literal that
332 * was taken to get to this position in the approximate
333 * minimum-cost parse. */
336 /* Offset (as in an LZ (length, offset) pair) of the
337 * match or literal that was taken to get to this
338 * position in the approximate minimum-cost parse. */
339 input_idx_t match_offset;
342 /* Position at which the match or literal starting at
343 * this position ends in the minimum-cost parse. */
346 /* Offset (as in an LZ (length, offset) pair) of the
347 * match or literal starting at this position in the
348 * approximate minimum-cost parse. */
349 input_idx_t match_offset;
353 /* The match offset LRU queue that will exist when the approximate
354 * minimum-cost path to reach this position is taken. */
355 struct lzx_lru_queue queue;
358 /* Suffix array link */
360 /* Rank of highest ranked suffix that has rank lower than the suffix
361 * corresponding to this structure and either has a lower position
362 * (initially) or has a position lower than the highest position at
363 * which matches have been searched for so far, or -1 if there is no
367 /* Rank of lowest ranked suffix that has rank greater than the suffix
368 * corresponding to this structure and either has a lower position
369 * (intially) or has a position lower than the highest position at which
370 * matches have been searched for so far, or -1 if there is no such
374 /* Length of longest common prefix between the suffix corresponding to
375 * this structure and the suffix with rank @prev, or 0 if @prev is -1.
379 /* Length of longest common prefix between the suffix corresponding to
380 * this structure and the suffix with rank @next, or 0 if @next is -1.
385 /* State of the LZX compressor. */
386 struct lzx_compressor {
388 /* The parameters that were used to create the compressor. */
389 struct wimlib_lzx_params params;
391 /* The buffer of data to be compressed.
393 * 0xe8 byte preprocessing is done directly on the data here before
394 * further compression.
396 * Note that this compressor does *not* use a real sliding window!!!!
397 * It's not needed in the WIM format, since every chunk is compressed
398 * independently. This is by design, to allow random access to the
401 * We reserve a few extra bytes to potentially allow reading off the end
402 * of the array in the match-finding code for optimization purposes.
406 /* Number of bytes of data to be compressed, which is the number of
407 * bytes of data in @window that are actually valid. */
408 input_idx_t window_size;
410 /* Allocated size of the @window. */
411 input_idx_t max_window_size;
413 /* Number of symbols in the main alphabet (depends on the
414 * @max_window_size since it determines the maximum allowed offset). */
415 unsigned num_main_syms;
417 /* The current match offset LRU queue. */
418 struct lzx_lru_queue queue;
420 /* Space for the sequences of matches/literals that were chosen for each
422 struct lzx_match *chosen_matches;
424 /* Information about the LZX blocks the preprocessed input was divided
426 struct lzx_block_spec *block_specs;
428 /* Number of LZX blocks the input was divided into; a.k.a. the number of
429 * elements of @block_specs that are valid. */
432 /* This is simply filled in with zeroes and used to avoid special-casing
433 * the output of the first compressed Huffman code, which conceptually
434 * has a delta taken from a code with all symbols having zero-length
436 struct lzx_codes zero_codes;
438 /* The current cost model. */
439 struct lzx_costs costs;
441 /* Fast algorithm only: Array of hash table links. */
442 input_idx_t *prev_tab;
444 /* Suffix array for window.
445 * This is a mapping from suffix rank to suffix position. */
448 /* Inverse suffix array for window.
449 * This is a mapping from suffix position to suffix rank.
450 * If 0 <= r < window_size, then ISA[SA[r]] == r. */
453 /* Longest common prefix array corresponding to the suffix array SA.
454 * LCP[i] is the length of the longest common prefix between the
455 * suffixes with positions SA[i - 1] and SA[i]. LCP[0] is undefined.
459 /* Suffix array links.
461 * During a linear scan of the input string to find matches, this array
462 * used to keep track of which rank suffixes in the suffix array appear
463 * before the current position. Instead of searching in the original
464 * suffix array, scans for matches at a given position traverse a linked
465 * list containing only suffixes that appear before that position. */
466 struct salink *salink;
468 /* Position in window of next match to return. */
469 input_idx_t match_window_pos;
471 /* The match-finder shall ensure the length of matches does not exceed
472 * this position in the input. */
473 input_idx_t match_window_end;
475 /* Matches found by the match-finder are cached in the following array
476 * to achieve a slight speedup when the same matches are needed on
477 * subsequent passes. This is suboptimal because different matches may
478 * be preferred with different cost models, but seems to be a worthwhile
480 struct raw_match *cached_matches;
481 unsigned cached_matches_pos;
484 /* Slow algorithm only: Temporary space used for match-choosing
487 * The size of this array must be at least LZX_MAX_MATCH_LEN but
488 * otherwise is arbitrary. More space simply allows the match-choosing
489 * algorithm to potentially find better matches (depending on the input,
491 struct lzx_optimal *optimum;
493 /* Slow algorithm only: Variables used by the match-choosing algorithm.
495 * When matches have been chosen, optimum_cur_idx is set to the position
496 * in the window of the next match/literal to return and optimum_end_idx
497 * is set to the position in the window at the end of the last
498 * match/literal to return. */
503 /* Returns the LZX position slot that corresponds to a given match offset,
504 * taking into account the recent offset queue and updating it if the offset is
507 lzx_get_position_slot(unsigned offset, struct lzx_lru_queue *queue)
509 unsigned position_slot;
511 /* See if the offset was recently used. */
512 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
513 if (offset == queue->R[i]) {
516 /* Bring the repeat offset to the front of the
517 * queue. Note: this is, in fact, not a real
518 * LRU queue because repeat matches are simply
519 * swapped to the front. */
520 swap(queue->R[0], queue->R[i]);
522 /* The resulting position slot is simply the first index
523 * at which the offset was found in the queue. */
528 /* The offset was not recently used; look up its real position slot. */
529 position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
531 /* Bring the new offset to the front of the queue. */
532 for (unsigned i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
533 queue->R[i] = queue->R[i - 1];
534 queue->R[0] = offset;
536 return position_slot;
539 /* Build the main, length, and aligned offset Huffman codes used in LZX.
541 * This takes as input the frequency tables for each code and produces as output
542 * a set of tables that map symbols to codewords and codeword lengths. */
544 lzx_make_huffman_codes(const struct lzx_freqs *freqs,
545 struct lzx_codes *codes,
546 unsigned num_main_syms)
548 make_canonical_huffman_code(num_main_syms,
549 LZX_MAX_MAIN_CODEWORD_LEN,
552 codes->codewords.main);
554 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
555 LZX_MAX_LEN_CODEWORD_LEN,
558 codes->codewords.len);
560 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
561 LZX_MAX_ALIGNED_CODEWORD_LEN,
564 codes->codewords.aligned);
568 * Output an LZX match.
570 * @out: The bitstream to write the match to.
571 * @block_type: The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM)
573 * @codes: Pointer to a structure that contains the codewords for the
574 * main, length, and aligned offset Huffman codes.
577 lzx_write_match(struct output_bitstream *out, int block_type,
578 struct lzx_match match, const struct lzx_codes *codes)
580 /* low 8 bits are the match length minus 2 */
581 unsigned match_len_minus_2 = match.data & 0xff;
582 /* Next 17 bits are the position footer */
583 unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
584 /* Next 6 bits are the position slot. */
585 unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
588 unsigned main_symbol;
589 unsigned num_extra_bits;
590 unsigned verbatim_bits;
591 unsigned aligned_bits;
593 /* If the match length is less than MIN_MATCH_LEN (= 2) +
594 * NUM_PRIMARY_LENS (= 7), the length header contains
595 * the match length minus MIN_MATCH_LEN, and there is no
598 * Otherwise, the length header contains
599 * NUM_PRIMARY_LENS, and the length footer contains
600 * the match length minus NUM_PRIMARY_LENS minus
602 if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
603 len_header = match_len_minus_2;
604 /* No length footer-- mark it with a special
606 len_footer = (unsigned)(-1);
608 len_header = LZX_NUM_PRIMARY_LENS;
609 len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
612 /* Combine the position slot with the length header into a single symbol
613 * that will be encoded with the main code.
615 * The actual main symbol is offset by LZX_NUM_CHARS because values
616 * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
618 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
620 /* Output main symbol. */
621 bitstream_put_bits(out, codes->codewords.main[main_symbol],
622 codes->lens.main[main_symbol]);
624 /* If there is a length footer, output it using the
625 * length Huffman code. */
626 if (len_footer != (unsigned)(-1)) {
627 bitstream_put_bits(out, codes->codewords.len[len_footer],
628 codes->lens.len[len_footer]);
631 num_extra_bits = lzx_get_num_extra_bits(position_slot);
633 /* For aligned offset blocks with at least 3 extra bits, output the
634 * verbatim bits literally, then the aligned bits encoded using the
635 * aligned offset code. Otherwise, only the verbatim bits need to be
637 if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
639 verbatim_bits = position_footer >> 3;
640 bitstream_put_bits(out, verbatim_bits,
643 aligned_bits = (position_footer & 7);
644 bitstream_put_bits(out,
645 codes->codewords.aligned[aligned_bits],
646 codes->lens.aligned[aligned_bits]);
648 /* verbatim bits is the same as the position
649 * footer, in this case. */
650 bitstream_put_bits(out, position_footer, num_extra_bits);
655 lzx_build_precode(const u8 lens[restrict],
656 const u8 prev_lens[restrict],
657 const unsigned num_syms,
658 input_idx_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
659 u8 output_syms[restrict num_syms],
660 u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
661 u16 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
662 unsigned *num_additional_bits_ret)
664 memset(precode_freqs, 0,
665 LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
667 /* Since the code word lengths use a form of RLE encoding, the goal here
668 * is to find each run of identical lengths when going through them in
669 * symbol order (including runs of length 1). For each run, as many
670 * lengths are encoded using RLE as possible, and the rest are output
673 * output_syms[] will be filled in with the length symbols that will be
674 * output, including RLE codes, not yet encoded using the precode.
676 * cur_run_len keeps track of how many code word lengths are in the
677 * current run of identical lengths. */
678 unsigned output_syms_idx = 0;
679 unsigned cur_run_len = 1;
680 unsigned num_additional_bits = 0;
681 for (unsigned i = 1; i <= num_syms; i++) {
683 if (i != num_syms && lens[i] == lens[i - 1]) {
684 /* Still in a run--- keep going. */
689 /* Run ended! Check if it is a run of zeroes or a run of
692 /* The symbol that was repeated in the run--- not to be confused
693 * with the length *of* the run (cur_run_len) */
694 unsigned len_in_run = lens[i - 1];
696 if (len_in_run == 0) {
697 /* A run of 0's. Encode it in as few length
698 * codes as we can. */
700 /* The magic length 18 indicates a run of 20 + n zeroes,
701 * where n is an uncompressed literal 5-bit integer that
702 * follows the magic length. */
703 while (cur_run_len >= 20) {
704 unsigned additional_bits;
706 additional_bits = min(cur_run_len - 20, 0x1f);
707 num_additional_bits += 5;
709 output_syms[output_syms_idx++] = 18;
710 output_syms[output_syms_idx++] = additional_bits;
711 cur_run_len -= 20 + additional_bits;
714 /* The magic length 17 indicates a run of 4 + n zeroes,
715 * where n is an uncompressed literal 4-bit integer that
716 * follows the magic length. */
717 while (cur_run_len >= 4) {
718 unsigned additional_bits;
720 additional_bits = min(cur_run_len - 4, 0xf);
721 num_additional_bits += 4;
723 output_syms[output_syms_idx++] = 17;
724 output_syms[output_syms_idx++] = additional_bits;
725 cur_run_len -= 4 + additional_bits;
730 /* A run of nonzero lengths. */
732 /* The magic length 19 indicates a run of 4 + n
733 * nonzeroes, where n is a literal bit that follows the
734 * magic length, and where the value of the lengths in
735 * the run is given by an extra length symbol, encoded
736 * with the precode, that follows the literal bit.
738 * The extra length symbol is encoded as a difference
739 * from the length of the codeword for the first symbol
740 * in the run in the previous code.
742 while (cur_run_len >= 4) {
743 unsigned additional_bits;
746 additional_bits = (cur_run_len > 4);
747 num_additional_bits += 1;
748 delta = (signed char)prev_lens[i - cur_run_len] -
749 (signed char)len_in_run;
753 precode_freqs[(unsigned char)delta]++;
754 output_syms[output_syms_idx++] = 19;
755 output_syms[output_syms_idx++] = additional_bits;
756 output_syms[output_syms_idx++] = delta;
757 cur_run_len -= 4 + additional_bits;
761 /* Any remaining lengths in the run are outputted without RLE,
762 * as a difference from the length of that codeword in the
764 while (cur_run_len > 0) {
767 delta = (signed char)prev_lens[i - cur_run_len] -
768 (signed char)len_in_run;
772 precode_freqs[(unsigned char)delta]++;
773 output_syms[output_syms_idx++] = delta;
780 /* Build the precode from the frequencies of the length symbols. */
782 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
783 LZX_MAX_PRE_CODEWORD_LEN,
784 precode_freqs, precode_lens,
787 *num_additional_bits_ret = num_additional_bits;
789 return output_syms_idx;
793 * Writes a compressed Huffman code to the output, preceded by the precode for
796 * The Huffman code is represented in the output as a series of path lengths
797 * from which the canonical Huffman code can be reconstructed. The path lengths
798 * themselves are compressed using a separate Huffman code, the precode, which
799 * consists of LZX_PRECODE_NUM_SYMBOLS (= 20) symbols that cover all possible
800 * code lengths, plus extra codes for repeated lengths. The path lengths of the
801 * precode precede the path lengths of the larger code and are uncompressed,
802 * consisting of 20 entries of 4 bits each.
804 * @out: Bitstream to write the code to.
805 * @lens: The code lengths for the Huffman code, indexed by symbol.
806 * @prev_lens: Code lengths for this Huffman code, indexed by symbol,
807 * in the *previous block*, or all zeroes if this is the
809 * @num_syms: The number of symbols in the code.
812 lzx_write_compressed_code(struct output_bitstream *out,
813 const u8 lens[restrict],
814 const u8 prev_lens[restrict],
817 input_idx_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
818 u8 output_syms[num_syms];
819 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
820 u16 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
822 unsigned num_output_syms;
826 num_output_syms = lzx_build_precode(lens,
835 /* Write the lengths of the precode codes to the output. */
836 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
837 bitstream_put_bits(out, precode_lens[i],
838 LZX_PRECODE_ELEMENT_SIZE);
840 /* Write the length symbols, encoded with the precode, to the output. */
842 for (i = 0; i < num_output_syms; ) {
843 precode_sym = output_syms[i++];
845 bitstream_put_bits(out, precode_codewords[precode_sym],
846 precode_lens[precode_sym]);
847 switch (precode_sym) {
849 bitstream_put_bits(out, output_syms[i++], 4);
852 bitstream_put_bits(out, output_syms[i++], 5);
855 bitstream_put_bits(out, output_syms[i++], 1);
856 bitstream_put_bits(out,
857 precode_codewords[output_syms[i]],
858 precode_lens[output_syms[i]]);
868 * Writes all compressed matches and literal bytes in an LZX block to the the
872 * The output bitstream.
874 * The type of the block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM).
876 * The array of matches/literals that will be output (length @match_count).
878 * Number of matches/literals to be output.
880 * Pointer to a structure that contains the codewords for the main, length,
881 * and aligned offset Huffman codes.
884 lzx_write_matches_and_literals(struct output_bitstream *ostream,
886 const struct lzx_match match_tab[],
887 unsigned match_count,
888 const struct lzx_codes *codes)
890 for (unsigned i = 0; i < match_count; i++) {
891 struct lzx_match match = match_tab[i];
893 /* High bit of the match indicates whether the match is an
894 * actual match (1) or a literal uncompressed byte (0) */
895 if (match.data & 0x80000000) {
897 lzx_write_match(ostream, block_type,
901 bitstream_put_bits(ostream,
902 codes->codewords.main[match.data],
903 codes->lens.main[match.data]);
909 lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
911 #ifdef ENABLE_LZX_DEBUG
914 for (i = 0; i < num_main_syms; i++)
915 LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
917 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
918 LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
920 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
921 LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
923 const unsigned tablebits = 10;
924 u16 decode_table[(1 << tablebits) +
925 (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
926 _aligned_attribute(DECODE_TABLE_ALIGNMENT);
927 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
929 min(tablebits, LZX_MAINCODE_TABLEBITS),
931 LZX_MAX_MAIN_CODEWORD_LEN));
932 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
933 LZX_LENCODE_NUM_SYMBOLS,
934 min(tablebits, LZX_LENCODE_TABLEBITS),
936 LZX_MAX_LEN_CODEWORD_LEN));
937 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
938 LZX_ALIGNEDCODE_NUM_SYMBOLS,
939 min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
941 LZX_MAX_ALIGNED_CODEWORD_LEN));
942 #endif /* ENABLE_LZX_DEBUG */
945 /* Write an LZX aligned offset or verbatim block to the output. */
947 lzx_write_compressed_block(int block_type,
949 unsigned max_window_size,
950 unsigned num_main_syms,
951 struct lzx_match * chosen_matches,
952 unsigned num_chosen_matches,
953 const struct lzx_codes * codes,
954 const struct lzx_codes * prev_codes,
955 struct output_bitstream * ostream)
959 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
960 block_type == LZX_BLOCKTYPE_VERBATIM);
961 lzx_assert_codes_valid(codes, num_main_syms);
963 /* The first three bits indicate the type of block and are one of the
964 * LZX_BLOCKTYPE_* constants. */
965 bitstream_put_bits(ostream, block_type, 3);
967 /* Output the block size.
969 * The original LZX format seemed to always encode the block size in 3
970 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
971 * uses the first bit to indicate whether the block is the default size
972 * (32768) or a different size given explicitly by the next 16 bits.
974 * By default, this compressor uses a window size of 32768 and therefore
975 * follows the WIMGAPI behavior. However, this compressor also supports
976 * window sizes greater than 32768 bytes, which do not appear to be
977 * supported by WIMGAPI. In such cases, we retain the default size bit
978 * to mean a size of 32768 bytes but output non-default block size in 24
979 * bits rather than 16. The compatibility of this behavior is unknown
980 * because WIMs created with chunk size greater than 32768 can seemingly
981 * only be opened by wimlib anyway. */
982 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
983 bitstream_put_bits(ostream, 1, 1);
985 bitstream_put_bits(ostream, 0, 1);
987 if (max_window_size >= 65536)
988 bitstream_put_bits(ostream, block_size >> 16, 8);
990 bitstream_put_bits(ostream, block_size, 16);
993 /* Write out lengths of the main code. Note that the LZX specification
994 * incorrectly states that the aligned offset code comes after the
995 * length code, but in fact it is the very first code to be written
996 * (before the main code). */
997 if (block_type == LZX_BLOCKTYPE_ALIGNED)
998 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
999 bitstream_put_bits(ostream, codes->lens.aligned[i],
1000 LZX_ALIGNEDCODE_ELEMENT_SIZE);
1002 LZX_DEBUG("Writing main code...");
1004 /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
1005 * the main code, which are the codewords for literal bytes. */
1006 lzx_write_compressed_code(ostream,
1008 prev_codes->lens.main,
1011 /* Write the precode and lengths for the rest of the main code, which
1012 * are the codewords for match headers. */
1013 lzx_write_compressed_code(ostream,
1014 codes->lens.main + LZX_NUM_CHARS,
1015 prev_codes->lens.main + LZX_NUM_CHARS,
1016 num_main_syms - LZX_NUM_CHARS);
1018 LZX_DEBUG("Writing length code...");
1020 /* Write the precode and lengths for the length code. */
1021 lzx_write_compressed_code(ostream,
1023 prev_codes->lens.len,
1024 LZX_LENCODE_NUM_SYMBOLS);
1026 LZX_DEBUG("Writing matches and literals...");
1028 /* Write the actual matches and literals. */
1029 lzx_write_matches_and_literals(ostream, block_type,
1030 chosen_matches, num_chosen_matches,
1033 LZX_DEBUG("Done writing block.");
1036 /* Write out the LZX blocks that were computed. */
1038 lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
1041 const struct lzx_codes *prev_codes = &ctx->zero_codes;
1042 for (unsigned i = 0; i < ctx->num_blocks; i++) {
1043 const struct lzx_block_spec *spec = &ctx->block_specs[i];
1045 LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
1046 i + 1, ctx->num_blocks,
1047 spec->block_type, spec->block_size,
1048 spec->num_chosen_matches);
1050 lzx_write_compressed_block(spec->block_type,
1052 ctx->max_window_size,
1054 &ctx->chosen_matches[spec->chosen_matches_start_pos],
1055 spec->num_chosen_matches,
1060 prev_codes = &spec->codes;
1064 /* Constructs an LZX match from a literal byte and updates the main code symbol
1067 lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
1073 /* Constructs an LZX match from an offset and a length, and updates the LRU
1074 * queue and the frequency of symbols in the main, length, and aligned offset
1075 * alphabets. The return value is a 32-bit number that provides the match in an
1076 * intermediate representation documented below. */
1078 lzx_tally_match(unsigned match_len, unsigned match_offset,
1079 struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
1081 unsigned position_slot;
1082 unsigned position_footer;
1084 unsigned main_symbol;
1085 unsigned len_footer;
1086 unsigned adjusted_match_len;
1088 LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
1090 /* The match offset shall be encoded as a position slot (itself encoded
1091 * as part of the main symbol) and a position footer. */
1092 position_slot = lzx_get_position_slot(match_offset, queue);
1093 position_footer = (match_offset + LZX_OFFSET_OFFSET) &
1094 ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
1096 /* The match length shall be encoded as a length header (itself encoded
1097 * as part of the main symbol) and an optional length footer. */
1098 adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
1099 if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
1100 /* No length footer needed. */
1101 len_header = adjusted_match_len;
1103 /* Length footer needed. It will be encoded using the length
1105 len_header = LZX_NUM_PRIMARY_LENS;
1106 len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
1107 freqs->len[len_footer]++;
1110 /* Account for the main symbol. */
1111 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1113 freqs->main[main_symbol]++;
1115 /* In an aligned offset block, 3 bits of the position footer are output
1116 * as an aligned offset symbol. Account for this, although we may
1117 * ultimately decide to output the block as verbatim. */
1119 /* The following check is equivalent to:
1121 * if (lzx_extra_bits[position_slot] >= 3)
1123 * Note that this correctly excludes position slots that correspond to
1124 * recent offsets. */
1125 if (position_slot >= 8)
1126 freqs->aligned[position_footer & 7]++;
1128 /* Pack the position slot, position footer, and match length into an
1129 * intermediate representation. See `struct lzx_match' for details.
1131 LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
1132 LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
1133 LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
1135 LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
1136 LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
1137 LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
1139 (position_slot << 25) |
1140 (position_footer << 8) |
1141 (adjusted_match_len);
1144 struct lzx_record_ctx {
1145 struct lzx_freqs freqs;
1146 struct lzx_lru_queue queue;
1147 struct lzx_match *matches;
1151 lzx_record_match(unsigned len, unsigned offset, void *_ctx)
1153 struct lzx_record_ctx *ctx = _ctx;
1155 (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
1159 lzx_record_literal(u8 lit, void *_ctx)
1161 struct lzx_record_ctx *ctx = _ctx;
1163 (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
1166 /* Returns the cost, in bits, to output a literal byte using the specified cost
1169 lzx_literal_cost(u8 c, const struct lzx_costs * costs)
1171 return costs->main[c];
1174 /* Given a (length, offset) pair that could be turned into a valid LZX match as
1175 * well as costs for the codewords in the main, length, and aligned Huffman
1176 * codes, return the approximate number of bits it will take to represent this
1177 * match in the compressed output. Take into account the match offset LRU
1178 * queue and optionally update it. */
1180 lzx_match_cost(unsigned length, unsigned offset, const struct lzx_costs *costs,
1181 struct lzx_lru_queue *queue)
1183 unsigned position_slot;
1184 unsigned len_header, main_symbol;
1187 position_slot = lzx_get_position_slot(offset, queue);
1189 len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1190 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1192 /* Account for main symbol. */
1193 cost += costs->main[main_symbol];
1195 /* Account for extra position information. */
1196 unsigned num_extra_bits = lzx_get_num_extra_bits(position_slot);
1197 if (num_extra_bits >= 3) {
1198 cost += num_extra_bits - 3;
1199 cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1201 cost += num_extra_bits;
1204 /* Account for extra length information. */
1205 if (len_header == LZX_NUM_PRIMARY_LENS)
1206 cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1212 /* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
1213 * Unlike lzx_match_cost() which does a true cost evaluation, this simply
1214 * prioritize matches based on their offset. */
1216 lzx_match_cost_fast(unsigned offset, const struct lzx_lru_queue *queue)
1218 /* It seems well worth it to take the time to give priority to recently
1220 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
1221 if (offset == queue->R[i])
1224 BUILD_BUG_ON(LZX_MAX_WINDOW_SIZE >= (block_cost_t)~0U);
1228 /* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
1231 * The cost model and codeword lengths are almost the same thing, but the
1232 * Huffman codewords with length 0 correspond to symbols with zero frequency
1233 * that still need to be assigned actual costs. The specific values assigned
1234 * are arbitrary, but they should be fairly high (near the maximum codeword
1235 * length) to take into account the fact that uses of these symbols are expected
1238 lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
1241 unsigned num_main_syms = ctx->num_main_syms;
1244 for (i = 0; i < num_main_syms; i++) {
1245 ctx->costs.main[i] = lens->main[i];
1246 if (ctx->costs.main[i] == 0)
1247 ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
1251 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1252 ctx->costs.len[i] = lens->len[i];
1253 if (ctx->costs.len[i] == 0)
1254 ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
1257 /* Aligned offset code */
1258 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1259 ctx->costs.aligned[i] = lens->aligned[i];
1260 if (ctx->costs.aligned[i] == 0)
1261 ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
1265 /* Advance the suffix array match-finder to the next position. */
1267 lzx_lz_update_salink(input_idx_t i,
1268 const input_idx_t SA[restrict],
1269 const input_idx_t ISA[restrict],
1270 struct salink link[restrict])
1272 /* r = Rank of the suffix at the current position. */
1273 const input_idx_t r = ISA[i];
1275 /* next = rank of LOWEST ranked suffix that is ranked HIGHER than the
1276 * current suffix AND has a LOWER position, or -1 if none exists. */
1277 const input_idx_t next = link[r].next;
1279 /* prev = rank of HIGHEST ranked suffix that is ranked LOWER than the
1280 * current suffix AND has a LOWER position, or -1 if none exists. */
1281 const input_idx_t prev = link[r].prev;
1283 /* Link the suffix at the current position into the linked list that
1284 * contains all suffixes in the suffix array that are appear at or
1285 * before the current position, sorted by rank.
1287 * Save the values of all fields we overwrite so that rollback is
1289 if (next != (input_idx_t)~0U) {
1291 link[next].prev = r;
1292 link[next].lcpprev = link[r].lcpnext;
1295 if (prev != (input_idx_t)~0U) {
1297 link[prev].next = r;
1298 link[prev].lcpnext = link[r].lcpprev;
1303 * Use the suffix array match-finder to retrieve a list of LZ matches at the
1306 * [in] @i Current position in the window.
1307 * [in] @SA Suffix array for the window.
1308 * [in] @ISA Inverse suffix array for the window.
1309 * [inout] @link Suffix array links used internally by the match-finder.
1310 * [out] @matches The (length, offset) pairs of the resulting matches will
1311 * be written here, sorted in decreasing order by
1312 * length. All returned lengths will be unique.
1313 * [in] @queue Recently used match offsets, used when evaluating the
1315 * [in] @min_match_len Minimum match length to return.
1316 * [in] @max_matches_to_consider Maximum number of matches to consider at
1318 * [in] @max_matches_to_return Maximum number of matches to return.
1320 * The return value is the number of matches found and written to @matches.
1323 lzx_lz_get_matches(const input_idx_t i,
1324 const input_idx_t SA[const restrict],
1325 const input_idx_t ISA[const restrict],
1326 struct salink link[const restrict],
1327 struct raw_match matches[const restrict],
1328 const struct lzx_lru_queue * const restrict queue,
1329 const unsigned min_match_len,
1330 const u32 max_matches_to_consider,
1331 const u32 max_matches_to_return)
1333 /* r = Rank of the suffix at the current position. */
1334 const input_idx_t r = ISA[i];
1336 /* Prepare for searching the current position. */
1337 lzx_lz_update_salink(i, SA, ISA, link);
1339 /* L = rank of next suffix to the left;
1340 * R = rank of next suffix to the right;
1341 * lenL = length of match between current position and the suffix with rank L;
1342 * lenR = length of match between current position and the suffix with rank R.
1344 * This is left and right relative to the rank of the current suffix.
1345 * Since the suffixes in the suffix array are sorted, the longest
1346 * matches are immediately to the left and right (using the linked list
1347 * to ignore all suffixes that occur later in the window). The match
1348 * length decreases the farther left and right we go. We shall keep the
1349 * length on both sides in sync in order to choose the lowest-cost match
1352 input_idx_t L = link[r].prev;
1353 input_idx_t R = link[r].next;
1354 input_idx_t lenL = link[r].lcpprev;
1355 input_idx_t lenR = link[r].lcpnext;
1357 /* nmatches = number of matches found so far. */
1358 unsigned nmatches = 0;
1360 /* best_cost = cost of lowest-cost match found so far.
1362 * We keep track of this so that we can ignore shorter matches that do
1363 * not have lower costs than a longer matches already found.
1365 block_cost_t best_cost = INFINITE_BLOCK_COST;
1367 /* count_remaining = maximum number of possible matches remaining to be
1369 u32 count_remaining = max_matches_to_consider;
1371 /* pending = match currently being considered for a specific length. */
1372 struct raw_match pending;
1373 block_cost_t pending_cost;
1375 while (lenL >= min_match_len || lenR >= min_match_len)
1378 pending_cost = INFINITE_BLOCK_COST;
1382 if (lenL >= min_match_len && lenL >= lenR) {
1385 if (--count_remaining == 0)
1386 goto out_save_pending;
1388 input_idx_t offset = i - SA[L];
1390 /* Save match if it has smaller cost. */
1391 cost = lzx_match_cost_fast(offset, queue);
1392 if (cost < pending_cost) {
1393 pending.offset = offset;
1394 pending_cost = cost;
1397 if (link[L].lcpprev < lenL) {
1398 /* Match length decreased. */
1400 lenL = link[L].lcpprev;
1402 /* Save the pending match unless the
1403 * right side still may have matches of
1404 * this length to be scanned, or if a
1405 * previous (longer) match had lower
1407 if (pending.len > lenR) {
1408 if (pending_cost < best_cost) {
1409 best_cost = pending_cost;
1410 matches[nmatches++] = pending;
1411 if (nmatches == max_matches_to_return)
1415 pending_cost = INFINITE_BLOCK_COST;
1417 if (lenL < min_match_len || lenL < lenR)
1427 if (lenR >= min_match_len && lenR > lenL) {
1430 if (--count_remaining == 0)
1431 goto out_save_pending;
1433 input_idx_t offset = i - SA[R];
1435 /* Save match if it has smaller cost. */
1436 cost = lzx_match_cost_fast(offset, queue);
1437 if (cost < pending_cost) {
1438 pending.offset = offset;
1439 pending_cost = cost;
1442 if (link[R].lcpnext < lenR) {
1443 /* Match length decreased. */
1445 lenR = link[R].lcpnext;
1447 /* Save the pending match unless a
1448 * previous (longer) match had lower
1450 if (pending_cost < best_cost) {
1451 matches[nmatches++] = pending;
1452 best_cost = pending_cost;
1453 if (nmatches == max_matches_to_return)
1457 if (lenR < min_match_len || lenR <= lenL)
1461 pending_cost = INFINITE_BLOCK_COST;
1470 if (pending_cost != INFINITE_BLOCK_COST)
1471 matches[nmatches++] = pending;
1478 /* Tell the match-finder to skip the specified number of bytes (@n) in the
1481 lzx_lz_skip_bytes(struct lzx_compressor *ctx, unsigned n)
1483 LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
1484 if (ctx->matches_cached) {
1485 ctx->match_window_pos += n;
1487 ctx->cached_matches_pos +=
1488 ctx->cached_matches[ctx->cached_matches_pos].len + 1;
1492 ctx->cached_matches[ctx->cached_matches_pos++].len = 0;
1493 lzx_lz_update_salink(ctx->match_window_pos++, ctx->SA,
1494 ctx->ISA, ctx->salink);
1499 /* Retrieve a list of matches available at the next position in the input.
1501 * The matches are written to ctx->matches in decreasing order of length, and
1502 * the return value is the number of matches found. */
1504 lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
1505 const struct lzx_lru_queue *queue,
1506 struct raw_match **matches_ret)
1508 unsigned num_matches;
1509 struct raw_match *matches;
1511 LZX_ASSERT(ctx->match_window_pos <= ctx->match_window_end);
1513 matches = &ctx->cached_matches[ctx->cached_matches_pos + 1];
1515 if (ctx->matches_cached) {
1516 num_matches = matches[-1].len;
1518 unsigned min_match_len = LZX_MIN_MATCH_LEN;
1519 if (!ctx->params.alg_params.slow.use_len2_matches)
1520 min_match_len = max(min_match_len, 3);
1521 const u32 max_search_depth = ctx->params.alg_params.slow.max_search_depth;
1522 const u32 max_matches_per_pos = ctx->params.alg_params.slow.max_matches_per_pos;
1524 if (unlikely(max_search_depth == 0 || max_matches_per_pos == 0))
1527 num_matches = lzx_lz_get_matches(ctx->match_window_pos,
1535 max_matches_per_pos);
1536 matches[-1].len = num_matches;
1538 ctx->cached_matches_pos += num_matches + 1;
1539 *matches_ret = matches;
1541 /* Cap the length of returned matches to the number of bytes remaining,
1542 * if it is not the whole window. */
1543 if (ctx->match_window_end < ctx->window_size) {
1544 unsigned maxlen = ctx->match_window_end - ctx->match_window_pos;
1545 for (unsigned i = 0; i < num_matches; i++)
1546 if (matches[i].len > maxlen)
1547 matches[i].len = maxlen;
1550 fprintf(stderr, "Pos %u/%u: %u matches\n",
1551 ctx->match_window_pos, ctx->match_window_end, num_matches);
1552 for (unsigned i = 0; i < num_matches; i++)
1553 fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
1556 #ifdef ENABLE_LZX_DEBUG
1557 for (unsigned i = 0; i < num_matches; i++) {
1558 LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
1559 LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
1560 LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
1561 LZX_ASSERT(matches[i].offset > 0);
1562 LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
1563 LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
1564 &ctx->window[ctx->match_window_pos - matches[i].offset],
1569 ctx->match_window_pos++;
1574 * Reverse the linked list of near-optimal matches so that they can be returned
1575 * in forwards order.
1577 * Returns the first match in the list.
1579 static struct raw_match
1580 lzx_lz_reverse_near_optimal_match_list(struct lzx_compressor *ctx,
1583 unsigned prev_link, saved_prev_link;
1584 unsigned prev_match_offset, saved_prev_match_offset;
1586 ctx->optimum_end_idx = cur_pos;
1588 saved_prev_link = ctx->optimum[cur_pos].prev.link;
1589 saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
1592 prev_link = saved_prev_link;
1593 prev_match_offset = saved_prev_match_offset;
1595 saved_prev_link = ctx->optimum[prev_link].prev.link;
1596 saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
1598 ctx->optimum[prev_link].next.link = cur_pos;
1599 ctx->optimum[prev_link].next.match_offset = prev_match_offset;
1601 cur_pos = prev_link;
1602 } while (cur_pos != 0);
1604 ctx->optimum_cur_idx = ctx->optimum[0].next.link;
1606 return (struct raw_match)
1607 { .len = ctx->optimum_cur_idx,
1608 .offset = ctx->optimum[0].next.match_offset,
1613 * lzx_lz_get_near_optimal_match() -
1615 * Choose the optimal match or literal to use at the next position in the input.
1617 * Unlike a greedy parser that always takes the longest match, or even a
1618 * parser with one match/literal look-ahead like zlib, the algorithm used here
1619 * may look ahead many matches/literals to determine the optimal match/literal to
1620 * output next. The motivation is that the compression ratio is improved if the
1621 * compressor can do things like use a shorter-than-possible match in order to
1622 * allow a longer match later, and also take into account the Huffman code cost
1623 * model rather than simply assuming that longer is better.
1625 * Still, this is not truly an optimal parser because very long matches are
1626 * taken immediately, and the raw match-finder takes some shortcuts. This is
1627 * done to avoid considering many different alternatives that are unlikely to
1628 * be significantly better.
1630 * This algorithm is based on that used in 7-Zip's DEFLATE encoder.
1632 * Each call to this function does one of two things:
1634 * 1. Build a near-optimal sequence of matches/literals, up to some point, that
1635 * will be returned by subsequent calls to this function, then return the
1640 * 2. Return the next match/literal previously computed by a call to this
1643 * This function relies on the following state in the compressor context:
1645 * ctx->window (read-only: preprocessed data being compressed)
1646 * ctx->cost (read-only: cost model to use)
1647 * ctx->optimum (internal state; leave uninitialized)
1648 * ctx->optimum_cur_idx (must set to 0 before first call)
1649 * ctx->optimum_end_idx (must set to 0 before first call)
1651 * Plus any state used by the raw match-finder.
1653 * The return value is a (length, offset) pair specifying the match or literal
1654 * chosen. For literals, the length is less than LZX_MIN_MATCH_LEN and the
1655 * offset is meaningless.
1657 static struct raw_match
1658 lzx_lz_get_near_optimal_match(struct lzx_compressor * ctx)
1660 unsigned num_possible_matches;
1661 struct raw_match *possible_matches;
1662 struct raw_match match;
1663 unsigned longest_match_len;
1665 if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
1666 /* Case 2: Return the next match/literal already found. */
1667 match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
1668 ctx->optimum_cur_idx;
1669 match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
1671 ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
1675 /* Case 1: Compute a new list of matches/literals to return. */
1677 ctx->optimum_cur_idx = 0;
1678 ctx->optimum_end_idx = 0;
1680 /* Get matches at this position. */
1681 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->queue, &possible_matches);
1683 /* If no matches found, return literal. */
1684 if (num_possible_matches == 0)
1685 return (struct raw_match){ .len = 0 };
1687 /* The matches that were found are sorted in decreasing order by length.
1688 * Get the length of the longest one. */
1689 longest_match_len = possible_matches[0].len;
1691 /* Greedy heuristic: if the longest match that was found is greater
1692 * than the number of fast bytes, return it immediately; don't both
1693 * doing more work. */
1694 if (longest_match_len > ctx->params.alg_params.slow.num_fast_bytes) {
1695 lzx_lz_skip_bytes(ctx, longest_match_len - 1);
1696 return possible_matches[0];
1699 /* Calculate the cost to reach the next position by outputting a
1701 ctx->optimum[0].queue = ctx->queue;
1702 ctx->optimum[1].queue = ctx->optimum[0].queue;
1703 ctx->optimum[1].cost = lzx_literal_cost(ctx->window[ctx->match_window_pos],
1705 ctx->optimum[1].prev.link = 0;
1707 /* Calculate the cost to reach any position up to and including that
1708 * reached by the longest match, using the shortest (i.e. closest) match
1709 * that reaches each position. */
1710 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1711 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1712 len <= longest_match_len; len++) {
1714 LZX_ASSERT(match_idx < num_possible_matches);
1716 ctx->optimum[len].queue = ctx->optimum[0].queue;
1717 ctx->optimum[len].prev.link = 0;
1718 ctx->optimum[len].prev.match_offset = possible_matches[match_idx].offset;
1719 ctx->optimum[len].cost = lzx_match_cost(len,
1720 possible_matches[match_idx].offset,
1722 &ctx->optimum[len].queue);
1723 if (len == possible_matches[match_idx].len)
1727 unsigned cur_pos = 0;
1729 /* len_end: greatest index forward at which costs have been calculated
1731 unsigned len_end = longest_match_len;
1734 /* Advance to next position. */
1737 if (cur_pos == len_end || cur_pos == LZX_OPTIM_ARRAY_SIZE)
1738 return lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1740 /* retrieve the number of matches available at this position */
1741 num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->optimum[cur_pos].queue,
1744 unsigned new_len = 0;
1746 if (num_possible_matches != 0) {
1747 new_len = possible_matches[0].len;
1749 /* Greedy heuristic: if we found a match greater than
1750 * the number of fast bytes, stop immediately. */
1751 if (new_len > ctx->params.alg_params.slow.num_fast_bytes) {
1753 /* Build the list of matches to return and get
1755 match = lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
1757 /* Append the long match to the end of the list. */
1758 ctx->optimum[cur_pos].next.match_offset =
1759 possible_matches[0].offset;
1760 ctx->optimum[cur_pos].next.link = cur_pos + new_len;
1761 ctx->optimum_end_idx = cur_pos + new_len;
1763 /* Skip over the remaining bytes of the long match. */
1764 lzx_lz_skip_bytes(ctx, new_len - 1);
1766 /* Return first match in the list */
1771 /* Consider proceeding with a literal byte. */
1772 block_cost_t cur_cost = ctx->optimum[cur_pos].cost;
1773 block_cost_t cur_plus_literal_cost = cur_cost +
1774 lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
1776 if (cur_plus_literal_cost < ctx->optimum[cur_pos + 1].cost) {
1777 ctx->optimum[cur_pos + 1].cost = cur_plus_literal_cost;
1778 ctx->optimum[cur_pos + 1].prev.link = cur_pos;
1779 ctx->optimum[cur_pos + 1].queue = ctx->optimum[cur_pos].queue;
1782 if (num_possible_matches == 0)
1785 /* Consider proceeding with a match. */
1787 while (len_end < cur_pos + new_len)
1788 ctx->optimum[++len_end].cost = INFINITE_BLOCK_COST;
1790 for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
1791 len <= new_len; len++) {
1792 LZX_ASSERT(match_idx < num_possible_matches);
1793 struct lzx_lru_queue q = ctx->optimum[cur_pos].queue;
1794 block_cost_t cost = cur_cost + lzx_match_cost(len,
1795 possible_matches[match_idx].offset,
1799 if (cost < ctx->optimum[cur_pos + len].cost) {
1800 ctx->optimum[cur_pos + len].cost = cost;
1801 ctx->optimum[cur_pos + len].prev.link = cur_pos;
1802 ctx->optimum[cur_pos + len].prev.match_offset =
1803 possible_matches[match_idx].offset;
1804 ctx->optimum[cur_pos + len].queue = q;
1807 if (len == possible_matches[match_idx].len)
1814 * Set default symbol costs.
1817 lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
1821 /* Literal symbols */
1822 for (i = 0; i < LZX_NUM_CHARS; i++)
1825 /* Match header symbols */
1826 for (; i < num_main_syms; i++)
1827 costs->main[i] = 10;
1829 /* Length symbols */
1830 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1833 /* Aligned offset symbols */
1834 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1835 costs->aligned[i] = 3;
1838 /* Given the frequencies of symbols in a compressed block and the corresponding
1839 * Huffman codes, return LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM if an
1840 * aligned offset or verbatim block, respectively, will take fewer bits to
1843 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1844 const struct lzx_codes * codes)
1846 unsigned aligned_cost = 0;
1847 unsigned verbatim_cost = 0;
1849 /* Verbatim blocks have a constant 3 bits per position footer. Aligned
1850 * offset blocks have an aligned offset symbol per position footer, plus
1851 * an extra 24 bits to output the lengths necessary to reconstruct the
1852 * aligned offset code itself. */
1853 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1854 verbatim_cost += 3 * freqs->aligned[i];
1855 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1857 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1858 if (aligned_cost < verbatim_cost)
1859 return LZX_BLOCKTYPE_ALIGNED;
1861 return LZX_BLOCKTYPE_VERBATIM;
1864 /* Find a near-optimal sequence of matches/literals with which to output the
1865 * specified LZX block, then set its type to that which has the minimum cost to
1868 lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
1869 unsigned num_passes)
1871 const struct lzx_lru_queue orig_queue = ctx->queue;
1872 struct lzx_freqs freqs;
1874 unsigned orig_window_pos = spec->window_pos;
1875 unsigned orig_cached_pos = ctx->cached_matches_pos;
1877 LZX_ASSERT(ctx->match_window_pos == spec->window_pos);
1879 ctx->match_window_end = spec->window_pos + spec->block_size;
1880 spec->chosen_matches_start_pos = spec->window_pos;
1882 LZX_ASSERT(num_passes >= 1);
1884 /* The first optimal parsing pass is done using the cost model already
1885 * set in ctx->costs. Each later pass is done using a cost model
1886 * computed from the previous pass. */
1887 for (unsigned pass = 0; pass < num_passes; pass++) {
1889 ctx->match_window_pos = orig_window_pos;
1890 ctx->cached_matches_pos = orig_cached_pos;
1891 ctx->queue = orig_queue;
1892 spec->num_chosen_matches = 0;
1893 memset(&freqs, 0, sizeof(freqs));
1895 for (unsigned i = spec->window_pos; i < spec->window_pos + spec->block_size; ) {
1896 struct raw_match raw_match;
1897 struct lzx_match lzx_match;
1899 raw_match = lzx_lz_get_near_optimal_match(ctx);
1900 if (raw_match.len >= LZX_MIN_MATCH_LEN) {
1901 lzx_match.data = lzx_tally_match(raw_match.len, raw_match.offset,
1902 &freqs, &ctx->queue);
1905 lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
1908 ctx->chosen_matches[spec->chosen_matches_start_pos +
1909 spec->num_chosen_matches++] = lzx_match;
1912 lzx_make_huffman_codes(&freqs, &spec->codes,
1913 ctx->num_main_syms);
1914 if (pass < num_passes - 1)
1915 lzx_set_costs(ctx, &spec->codes.lens);
1916 ctx->matches_cached = true;
1918 spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
1919 ctx->matches_cached = false;
1923 lzx_optimize_blocks(struct lzx_compressor *ctx)
1925 lzx_lru_queue_init(&ctx->queue);
1926 ctx->optimum_cur_idx = 0;
1927 ctx->optimum_end_idx = 0;
1929 const unsigned num_passes = ctx->params.alg_params.slow.num_optim_passes;
1931 for (unsigned i = 0; i < ctx->num_blocks; i++)
1932 lzx_optimize_block(ctx, &ctx->block_specs[i], num_passes);
1935 /* Initialize the suffix array match-finder for the specified input. */
1937 lzx_lz_init_matchfinder(const u8 T[const restrict],
1938 const input_idx_t n,
1939 input_idx_t SA[const restrict],
1940 input_idx_t ISA[const restrict],
1941 input_idx_t LCP[const restrict],
1942 struct salink link[const restrict],
1943 const unsigned max_match_len)
1945 /* Compute SA (Suffix Array). */
1948 /* ISA and link are used as temporary space. */
1949 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * sizeof(ISA[0]) < 256 * sizeof(saidx_t));
1950 BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * 2 * sizeof(link[0]) < 256 * 256 * sizeof(saidx_t));
1952 if (sizeof(input_idx_t) == sizeof(saidx_t)) {
1953 divsufsort(T, SA, n, (saidx_t*)ISA, (saidx_t*)link);
1956 divsufsort(T, sa, n, (saidx_t*)ISA, (saidx_t*)link);
1957 for (input_idx_t i = 0; i < n; i++)
1962 #ifdef ENABLE_LZX_DEBUG
1966 /* Verify suffix array. */
1970 for (input_idx_t r = 0; r < n; r++) {
1971 input_idx_t i = SA[r];
1973 LZX_ASSERT(!found[i]);
1978 for (input_idx_t r = 0; r < n - 1; r++) {
1980 input_idx_t i1 = SA[r];
1981 input_idx_t i2 = SA[r + 1];
1983 input_idx_t n1 = n - i1;
1984 input_idx_t n2 = n - i2;
1986 LZX_ASSERT(memcmp(&T[i1], &T[i2], min(n1, n2)) <= 0);
1988 LZX_DEBUG("Verified SA (len %u)", n);
1989 #endif /* ENABLE_LZX_DEBUG */
1991 /* Compute ISA (Inverse Suffix Array) */
1992 for (input_idx_t r = 0; r < n; r++)
1995 /* Compute LCP (longest common prefix) array.
1997 * Algorithm adapted from Kasai et al. 2001: "Linear-Time
1998 * Longest-Common-Prefix Computation in Suffix Arrays and Its
2002 for (input_idx_t i = 0; i < n; i++) {
2003 input_idx_t r = ISA[i];
2005 input_idx_t j = SA[r - 1];
2007 input_idx_t lim = min(n - i, n - j);
2009 while (h < lim && T[i + h] == T[j + h])
2018 #ifdef ENABLE_LZX_DEBUG
2019 /* Verify LCP array. */
2020 for (input_idx_t r = 0; r < n - 1; r++) {
2021 LZX_ASSERT(ISA[SA[r]] == r);
2022 LZX_ASSERT(ISA[SA[r + 1]] == r + 1);
2024 input_idx_t i1 = SA[r];
2025 input_idx_t i2 = SA[r + 1];
2026 input_idx_t lcp = LCP[r + 1];
2028 input_idx_t n1 = n - i1;
2029 input_idx_t n2 = n - i2;
2031 LZX_ASSERT(lcp <= min(n1, n2));
2033 LZX_ASSERT(memcmp(&T[i1], &T[i2], lcp) == 0);
2034 if (lcp < min(n1, n2))
2035 LZX_ASSERT(T[i1 + lcp] != T[i2 + lcp]);
2037 #endif /* ENABLE_LZX_DEBUG */
2039 /* Compute salink.next and salink.lcpnext.
2041 * Algorithm adapted from Crochemore et al. 2009:
2042 * "LPF computation revisited".
2044 * Note: we cap lcpnext to the maximum match length so that the
2045 * match-finder need not worry about it later. */
2046 link[n - 1].next = (input_idx_t)~0U;
2047 link[n - 1].prev = (input_idx_t)~0U;
2048 link[n - 1].lcpnext = 0;
2049 link[n - 1].lcpprev = 0;
2050 for (input_idx_t r = n - 2; r != (input_idx_t)~0U; r--) {
2051 input_idx_t t = r + 1;
2052 input_idx_t l = LCP[t];
2053 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2054 l = min(l, link[t].lcpnext);
2058 link[r].lcpnext = min(l, max_match_len);
2059 LZX_ASSERT(t == (input_idx_t)~0U || l <= n - SA[t]);
2060 LZX_ASSERT(l <= n - SA[r]);
2061 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2064 /* Compute salink.prev and salink.lcpprev.
2066 * Algorithm adapted from Crochemore et al. 2009:
2067 * "LPF computation revisited".
2069 * Note: we cap lcpprev to the maximum match length so that the
2070 * match-finder need not worry about it later. */
2071 link[0].prev = (input_idx_t)~0;
2072 link[0].next = (input_idx_t)~0;
2073 link[0].lcpprev = 0;
2074 link[0].lcpnext = 0;
2075 for (input_idx_t r = 1; r < n; r++) {
2076 input_idx_t t = r - 1;
2077 input_idx_t l = LCP[r];
2078 while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
2079 l = min(l, link[t].lcpprev);
2083 link[r].lcpprev = min(l, max_match_len);
2084 LZX_ASSERT(t == (input_idx_t)~0 || l <= n - SA[t]);
2085 LZX_ASSERT(l <= n - SA[r]);
2086 LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
2090 /* Prepare the input window into one or more LZX blocks ready to be output. */
2092 lzx_prepare_blocks(struct lzx_compressor * ctx)
2094 /* Initialize the match-finder. */
2095 lzx_lz_init_matchfinder(ctx->window, ctx->window_size,
2096 ctx->SA, ctx->ISA, ctx->LCP, ctx->salink,
2098 ctx->cached_matches_pos = 0;
2099 ctx->matches_cached = false;
2100 ctx->match_window_pos = 0;
2102 /* Set up a default cost model. */
2103 lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
2105 ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
2106 for (unsigned i = 0; i < ctx->num_blocks; i++) {
2107 unsigned pos = LZX_DIV_BLOCK_SIZE * i;
2108 ctx->block_specs[i].window_pos = pos;
2109 ctx->block_specs[i].block_size = min(ctx->window_size - pos, LZX_DIV_BLOCK_SIZE);
2112 /* Determine sequence of matches/literals to output for each block. */
2113 lzx_optimize_blocks(ctx);
2117 * This is the fast version of lzx_prepare_blocks(). This version "quickly"
2118 * prepares a single compressed block containing the entire input. See the
2119 * description of the "Fast algorithm" at the beginning of this file for more
2122 * Input --- the preprocessed data:
2127 * Output --- the block specification and the corresponding match/literal data:
2129 * ctx->block_specs[]
2131 * ctx->chosen_matches[]
2134 lzx_prepare_block_fast(struct lzx_compressor * ctx)
2136 struct lzx_record_ctx record_ctx;
2137 struct lzx_block_spec *spec;
2139 /* Parameters to hash chain LZ match finder
2140 * (lazy with 1 match lookahead) */
2141 static const struct lz_params lzx_lz_params = {
2142 /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
2143 * aren't worth choosing when using greedy or lazy parsing. */
2145 .max_match = LZX_MAX_MATCH_LEN,
2146 .max_offset = 32768,
2147 .good_match = LZX_MAX_MATCH_LEN,
2148 .nice_match = LZX_MAX_MATCH_LEN,
2149 .max_chain_len = LZX_MAX_MATCH_LEN,
2150 .max_lazy_match = LZX_MAX_MATCH_LEN,
2154 /* Initialize symbol frequencies and match offset LRU queue. */
2155 memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
2156 lzx_lru_queue_init(&record_ctx.queue);
2157 record_ctx.matches = ctx->chosen_matches;
2159 /* Determine series of matches/literals to output. */
2160 lz_analyze_block(ctx->window,
2168 /* Set up block specification. */
2169 spec = &ctx->block_specs[0];
2170 spec->block_type = LZX_BLOCKTYPE_ALIGNED;
2171 spec->window_pos = 0;
2172 spec->block_size = ctx->window_size;
2173 spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
2174 spec->chosen_matches_start_pos = 0;
2175 lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
2176 ctx->num_main_syms);
2177 ctx->num_blocks = 1;
2181 do_call_insn_translation(u32 *call_insn_target, int input_pos,
2187 rel_offset = le32_to_cpu(*call_insn_target);
2188 if (rel_offset >= -input_pos && rel_offset < file_size) {
2189 if (rel_offset < file_size - input_pos) {
2190 /* "good translation" */
2191 abs_offset = rel_offset + input_pos;
2193 /* "compensating translation" */
2194 abs_offset = rel_offset - file_size;
2196 *call_insn_target = cpu_to_le32(abs_offset);
2200 /* This is the reverse of undo_call_insn_preprocessing() in lzx-decompress.c.
2201 * See the comment above that function for more information. */
2203 do_call_insn_preprocessing(u8 data[], int size)
2205 for (int i = 0; i < size - 10; i++) {
2206 if (data[i] == 0xe8) {
2207 do_call_insn_translation((u32*)&data[i + 1], i,
2208 LZX_WIM_MAGIC_FILESIZE);
2214 /* API function documented in wimlib.h */
2216 wimlib_lzx_compress2(const void * const restrict uncompressed_data,
2217 unsigned const uncompressed_len,
2218 void * const restrict compressed_data,
2219 struct wimlib_lzx_context * const restrict lzx_ctx)
2221 struct lzx_compressor *ctx = (struct lzx_compressor*)lzx_ctx;
2222 struct output_bitstream ostream;
2223 input_idx_t compressed_len;
2225 if (uncompressed_len < 100) {
2226 LZX_DEBUG("Too small to bother compressing.");
2230 if (uncompressed_len > ctx->max_window_size) {
2231 LZX_DEBUG("Can't compress %u bytes using window of %u bytes!",
2232 uncompressed_len, ctx->max_window_size);
2236 LZX_DEBUG("Attempting to compress %u bytes...", uncompressed_len);
2238 /* The input data must be preprocessed. To avoid changing the original
2239 * input, copy it to a temporary buffer. */
2240 memcpy(ctx->window, uncompressed_data, uncompressed_len);
2241 ctx->window_size = uncompressed_len;
2243 /* This line is unnecessary; it just avoids inconsequential accesses of
2244 * uninitialized memory that would show up in memory-checking tools such
2246 memset(&ctx->window[ctx->window_size], 0, 12);
2248 LZX_DEBUG("Preprocessing data...");
2250 /* Before doing any actual compression, do the call instruction (0xe8
2251 * byte) translation on the uncompressed data. */
2252 do_call_insn_preprocessing(ctx->window, ctx->window_size);
2254 LZX_DEBUG("Preparing blocks...");
2256 /* Prepare the compressed data. */
2257 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
2258 lzx_prepare_block_fast(ctx);
2260 lzx_prepare_blocks(ctx);
2262 LZX_DEBUG("Writing compressed blocks...");
2264 /* Generate the compressed data. */
2265 init_output_bitstream(&ostream, compressed_data, ctx->window_size - 1);
2266 lzx_write_all_blocks(ctx, &ostream);
2268 LZX_DEBUG("Flushing bitstream...");
2269 compressed_len = flush_output_bitstream(&ostream);
2270 if (compressed_len == ~(input_idx_t)0) {
2271 LZX_DEBUG("Data did not compress to less than original length!");
2275 LZX_DEBUG("Done: compressed %u => %u bytes.",
2276 uncompressed_len, compressed_len);
2278 /* Verify that we really get the same thing back when decompressing.
2279 * Although this could be disabled by default in all cases, it only
2280 * takes around 2-3% of the running time of the slow algorithm to do the
2282 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
2283 #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
2288 /* The decompression buffer can be any temporary space that's no
2290 u8 *buf = (u8*)(ctx->SA ? ctx->SA : ctx->prev_tab);
2292 if (wimlib_lzx_decompress2(compressed_data, compressed_len,
2293 buf, uncompressed_len, ctx->max_window_size))
2295 ERROR("Failed to decompress data we "
2296 "compressed using LZX algorithm");
2301 if (memcmp(uncompressed_data, buf, uncompressed_len)) {
2302 ERROR("Data we compressed using LZX algorithm "
2303 "didn't decompress to original");
2308 return compressed_len;
2312 lzx_params_compatible(const struct wimlib_lzx_params *oldparams,
2313 const struct wimlib_lzx_params *newparams)
2315 return 0 == memcmp(oldparams, newparams, sizeof(struct wimlib_lzx_params));
2318 static struct wimlib_lzx_params lzx_user_default_params;
2319 static struct wimlib_lzx_params *lzx_user_default_params_ptr;
2322 lzx_params_valid(const struct wimlib_lzx_params *params)
2324 /* Validate parameters. */
2325 if (params->size_of_this != sizeof(struct wimlib_lzx_params)) {
2326 LZX_DEBUG("Invalid parameter structure size!");
2330 if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
2331 params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
2333 LZX_DEBUG("Invalid algorithm.");
2337 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2338 if (params->alg_params.slow.num_optim_passes < 1)
2340 LZX_DEBUG("Invalid number of optimization passes!");
2344 if (params->alg_params.slow.main_nostat_cost < 1 ||
2345 params->alg_params.slow.main_nostat_cost > 16)
2347 LZX_DEBUG("Invalid main_nostat_cost!");
2351 if (params->alg_params.slow.len_nostat_cost < 1 ||
2352 params->alg_params.slow.len_nostat_cost > 16)
2354 LZX_DEBUG("Invalid len_nostat_cost!");
2358 if (params->alg_params.slow.aligned_nostat_cost < 1 ||
2359 params->alg_params.slow.aligned_nostat_cost > 8)
2361 LZX_DEBUG("Invalid aligned_nostat_cost!");
2368 /* API function documented in wimlib.h */
2370 wimlib_lzx_set_default_params(const struct wimlib_lzx_params * params)
2373 if (!lzx_params_valid(params))
2374 return WIMLIB_ERR_INVALID_PARAM;
2375 lzx_user_default_params = *params;
2376 lzx_user_default_params_ptr = &lzx_user_default_params;
2378 lzx_user_default_params_ptr = NULL;
2383 /* API function documented in wimlib.h */
2385 wimlib_lzx_alloc_context(u32 window_size,
2386 const struct wimlib_lzx_params *params,
2387 struct wimlib_lzx_context **ctx_pp)
2390 LZX_DEBUG("Allocating LZX context...");
2392 if (!lzx_window_size_valid(window_size))
2393 return WIMLIB_ERR_INVALID_PARAM;
2395 struct lzx_compressor *ctx;
2397 static const struct wimlib_lzx_params fast_default = {
2398 .size_of_this = sizeof(struct wimlib_lzx_params),
2399 .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
2406 static const struct wimlib_lzx_params slow_default = {
2407 .size_of_this = sizeof(struct wimlib_lzx_params),
2408 .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
2412 .use_len2_matches = 1,
2413 .num_fast_bytes = 32,
2414 .num_optim_passes = 2,
2415 .max_search_depth = 50,
2416 .max_matches_per_pos = 3,
2417 .main_nostat_cost = 15,
2418 .len_nostat_cost = 15,
2419 .aligned_nostat_cost = 7,
2425 if (!lzx_params_valid(params))
2426 return WIMLIB_ERR_INVALID_PARAM;
2428 LZX_DEBUG("Using default algorithm and parameters.");
2429 if (lzx_user_default_params_ptr)
2430 params = lzx_user_default_params_ptr;
2432 params = &slow_default;
2435 if (params->use_defaults) {
2436 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
2437 params = &slow_default;
2439 params = &fast_default;
2443 ctx = *(struct lzx_compressor**)ctx_pp;
2446 lzx_params_compatible(&ctx->params, params) &&
2447 ctx->max_window_size == window_size)
2450 LZX_DEBUG("Check parameters only.");
2454 LZX_DEBUG("Allocating memory.");
2456 ctx = CALLOC(1, sizeof(struct lzx_compressor));
2460 ctx->num_main_syms = lzx_get_num_main_syms(window_size);
2461 ctx->max_window_size = window_size;
2462 ctx->window = MALLOC(window_size + 12);
2463 if (ctx->window == NULL)
2466 if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
2467 ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
2468 if (ctx->prev_tab == NULL)
2472 size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
2473 ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
2474 if (ctx->block_specs == NULL)
2477 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2478 ctx->SA = MALLOC(3U * window_size * sizeof(ctx->SA[0]));
2479 if (ctx->SA == NULL)
2481 ctx->ISA = ctx->SA + window_size;
2482 ctx->LCP = ctx->ISA + window_size;
2484 ctx->salink = MALLOC(window_size * sizeof(ctx->salink[0]));
2485 if (ctx->salink == NULL)
2489 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2490 ctx->optimum = MALLOC((LZX_OPTIM_ARRAY_SIZE + LZX_MAX_MATCH_LEN) *
2491 sizeof(ctx->optimum[0]));
2492 if (ctx->optimum == NULL)
2496 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
2499 cache_per_pos = params->alg_params.slow.max_matches_per_pos;
2500 if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
2501 cache_per_pos = LZX_MAX_CACHE_PER_POS;
2503 ctx->cached_matches = MALLOC(window_size * (cache_per_pos + 1) *
2504 sizeof(ctx->cached_matches[0]));
2505 if (ctx->cached_matches == NULL)
2509 ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
2510 if (ctx->chosen_matches == NULL)
2513 memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_params));
2514 memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
2516 LZX_DEBUG("Successfully allocated new LZX context.");
2518 wimlib_lzx_free_context(*ctx_pp);
2519 *ctx_pp = (struct wimlib_lzx_context*)ctx;
2523 wimlib_lzx_free_context((struct wimlib_lzx_context*)ctx);
2524 LZX_DEBUG("Ran out of memory.");
2525 return WIMLIB_ERR_NOMEM;
2528 /* API function documented in wimlib.h */
2530 wimlib_lzx_free_context(struct wimlib_lzx_context *_ctx)
2532 struct lzx_compressor *ctx = (struct lzx_compressor*)_ctx;
2535 FREE(ctx->chosen_matches);
2536 FREE(ctx->cached_matches);
2540 FREE(ctx->block_specs);
2541 FREE(ctx->prev_tab);
2547 /* API function documented in wimlib.h */
2549 wimlib_lzx_compress(const void * const restrict uncompressed_data,
2550 unsigned const uncompressed_len,
2551 void * const restrict compressed_data)
2554 struct wimlib_lzx_context *ctx = NULL;
2555 unsigned compressed_len;
2557 ret = wimlib_lzx_alloc_context(32768, NULL, &ctx);
2559 wimlib_assert(ret != WIMLIB_ERR_INVALID_PARAM);
2560 WARNING("Couldn't allocate LZX compression context: %"TS"",
2561 wimlib_get_error_string(ret));
2565 compressed_len = wimlib_lzx_compress2(uncompressed_data,
2570 wimlib_lzx_free_context(ctx);
2572 return compressed_len;