4 * LZX compression routines
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
28 * This file contains a compressor for the LZX compression format, as used in
29 * the WIM file format.
34 * First, the primary reference for the LZX compression format is the
35 * specification released by Microsoft.
37 * Second, the comments in lzx-decompress.c provide some more information about
38 * the LZX compression format, including errors in the Microsoft specification.
40 * Do note that LZX shares many similarities with DEFLATE, the algorithm used by
41 * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding,
42 * and certain other details are quite similar, such as the method for storing
43 * Huffman codes. However, some of the main differences are:
45 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
46 * compressible before attempting to compress it further.
47 * - LZX uses a "main" alphabet which combines literals and matches, with the
48 * match symbols containing a "length header" (giving all or part of the match
49 * length) and a "position slot" (giving, roughly speaking, the order of
50 * magnitude of the match offset).
51 * - LZX does not have static Huffman blocks; however it does have two types of
52 * dynamic Huffman blocks ("aligned offset" and "verbatim").
53 * - LZX has a minimum match length of 2 rather than 3.
54 * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
55 * queue of match offsets. This is very useful for certain types of files,
56 * such as binary files that have repeating records.
61 * There are actually two distinct overall algorithms implemented here. We
62 * shall refer to them as the "slow" algorithm and the "fast" algorithm. The
63 * "slow" algorithm spends more time compressing to achieve a higher compression
64 * ratio compared to the "fast" algorithm. More details are presented below.
69 * The "slow" algorithm to generate LZX-compressed data is roughly as follows:
71 * 1. Preprocess the input data to translate the targets of x86 call
72 * instructions to absolute offsets.
74 * 2. Build the suffix array and inverse suffix array for the input data. The
75 * suffix array contains the indices of all suffixes of the input data,
76 * sorted lexcographically by the corresponding suffixes. The "position" of
77 * a suffix is the index of that suffix in the original string, whereas the
78 * "rank" of a suffix is the index at which that suffix's position is found
79 * in the suffix array.
81 * 3. Build the longest common prefix array corresponding to the suffix array.
83 * 4. For each suffix, find the highest lower ranked suffix that has a lower
84 * position, the lowest higher ranked suffix that has a lower position, and
85 * the length of the common prefix shared between each. This information is
86 * later used to link suffix ranks into a doubly-linked list for searching
89 * 5. Set a default cost model for matches/literals.
91 * 6. Determine the lowest cost sequence of LZ77 matches ((offset, length)
92 * pairs) and literal bytes to divide the input into. Raw match-finding is
93 * done by searching the suffix array using a linked list to avoid
94 * considering any suffixes that start after the current position. Each run
95 * of the match-finder returns the approximate lowest-cost longest match as
96 * well as any shorter matches that have even lower approximate costs. Each
97 * such run also adds the suffix rank of the current position into the linked
98 * list being used to search the suffix array. Parsing, or match-choosing,
99 * is solved as a minimum-cost path problem using a forward "optimal parsing"
100 * algorithm based on the Deflate encoder from 7-Zip. This algorithm moves
101 * forward calculating the minimum cost to reach each byte until either a
102 * very long match is found or until a position is found at which no matches
105 * 7. Build the Huffman codes needed to output the matches/literals.
107 * 8. Up to a certain number of iterations, use the resulting Huffman codes to
108 * refine a cost model and go back to Step #6 to determine an improved
109 * sequence of matches and literals.
111 * 9. Output the resulting block using the match/literal sequences and the
112 * Huffman codes that were computed for the block.
114 * Note: the algorithm does not yet attempt to split the input into multiple LZX
115 * blocks; it instead uses a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
120 * The fast algorithm (and the only one available in wimlib v1.5.1 and earlier)
121 * spends much less time on the main bottlenecks of the compression process ---
122 * that is, the match finding and match choosing. Matches are found and chosen
123 * with hash chains using a greedy parse with one position of look-ahead. No
124 * block splitting is done; only compressing the full input into an aligned
125 * offset block is considered.
130 * Acknowledgments to several open-source projects and research papers that made
131 * it possible to implement this code:
133 * - divsufsort (author: Yuta Mori), for the suffix array construction code,
134 * located in a separate file (divsufsort.c).
136 * - "Linear-Time Longest-Common-Prefix Computation in Suffix Arrays and Its
137 * Applications" (Kasai et al. 2001), for the LCP array computation.
139 * - "LPF computation revisited" (Crochemore et al. 2009) for the prev and next
140 * array computations.
142 * - 7-Zip (author: Igor Pavlov) for the algorithm for forward optimal parsing
145 * - zlib (author: Jean-loup Gailly and Mark Adler), for the hash table
146 * match-finding algorithm (used in lz77.c).
148 * - lzx-compress (author: Matthew T. Russotto), on which some parts of this
149 * code were originally based.
157 #include "wimlib/compressor_ops.h"
158 #include "wimlib/compress_common.h"
159 #include "wimlib/endianness.h"
160 #include "wimlib/error.h"
161 #include "wimlib/lz_hash.h"
162 #include "wimlib/lz_sarray.h"
163 #include "wimlib/lzx.h"
164 #include "wimlib/util.h"
167 #ifdef ENABLE_LZX_DEBUG
168 # include "wimlib/decompress_common.h"
171 typedef u32 block_cost_t;
172 #define INFINITE_BLOCK_COST (~(block_cost_t)0)
174 #define LZX_OPTIM_ARRAY_SIZE 4096
176 #define LZX_DIV_BLOCK_SIZE 32768
178 #define LZX_MAX_CACHE_PER_POS 10
180 /* Codewords for the LZX main, length, and aligned offset Huffman codes */
181 struct lzx_codewords {
182 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
183 u32 len[LZX_LENCODE_NUM_SYMBOLS];
184 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
187 /* Codeword lengths (in bits) for the LZX main, length, and aligned offset
190 * A 0 length means the codeword has zero frequency.
193 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
194 u8 len[LZX_LENCODE_NUM_SYMBOLS];
195 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
198 /* Costs for the LZX main, length, and aligned offset Huffman symbols.
200 * If a codeword has zero frequency, it must still be assigned some nonzero cost
201 * --- generally a high cost, since even if it gets used in the next iteration,
202 * it probably will not be used very times. */
204 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
205 u8 len[LZX_LENCODE_NUM_SYMBOLS];
206 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
209 /* The LZX main, length, and aligned offset Huffman codes */
211 struct lzx_codewords codewords;
212 struct lzx_lens lens;
215 /* Tables for tallying symbol frequencies in the three LZX alphabets */
217 input_idx_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
218 input_idx_t len[LZX_LENCODE_NUM_SYMBOLS];
219 input_idx_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
222 /* LZX intermediate match/literal format */
226 * 31 1 if a match, 0 if a literal.
228 * 30-25 position slot. This can be at most 50, so it will fit in 6
231 * 8-24 position footer. This is the offset of the real formatted
232 * offset from the position base. This can be at most 17 bits
233 * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
235 * 0-7 length of match, minus 2. This can be at most
236 * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
240 /* Specification for an LZX block. */
241 struct lzx_block_spec {
243 /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
247 /* 0-based position in the window at which this block starts. */
248 input_idx_t window_pos;
250 /* The number of bytes of uncompressed data this block represents. */
251 input_idx_t block_size;
253 /* The position in the 'chosen_matches' array in the `struct
254 * lzx_compressor' at which the match/literal specifications for
255 * this block begin. */
256 input_idx_t chosen_matches_start_pos;
258 /* The number of match/literal specifications for this block. */
259 input_idx_t num_chosen_matches;
261 /* Huffman codes for this block. */
262 struct lzx_codes codes;
265 /* Include template for the match-choosing algorithm. */
266 #define LZ_COMPRESSOR struct lzx_compressor
267 #define LZ_ADAPTIVE_STATE struct lzx_lru_queue
268 struct lzx_compressor;
269 #include "wimlib/lz_optimal.h"
271 /* State of the LZX compressor. */
272 struct lzx_compressor {
274 /* The parameters that were used to create the compressor. */
275 struct wimlib_lzx_compressor_params params;
277 /* The buffer of data to be compressed.
279 * 0xe8 byte preprocessing is done directly on the data here before
280 * further compression.
282 * Note that this compressor does *not* use a real sliding window!!!!
283 * It's not needed in the WIM format, since every chunk is compressed
284 * independently. This is by design, to allow random access to the
287 * We reserve a few extra bytes to potentially allow reading off the end
288 * of the array in the match-finding code for optimization purposes
289 * (currently only needed for the hash chain match-finder). */
292 /* Number of bytes of data to be compressed, which is the number of
293 * bytes of data in @window that are actually valid. */
294 input_idx_t window_size;
296 /* Allocated size of the @window. */
297 input_idx_t max_window_size;
299 /* Number of symbols in the main alphabet (depends on the
300 * @max_window_size since it determines the maximum allowed offset). */
301 unsigned num_main_syms;
303 /* The current match offset LRU queue. */
304 struct lzx_lru_queue queue;
306 /* Space for the sequences of matches/literals that were chosen for each
308 struct lzx_match *chosen_matches;
310 /* Information about the LZX blocks the preprocessed input was divided
312 struct lzx_block_spec *block_specs;
314 /* Number of LZX blocks the input was divided into; a.k.a. the number of
315 * elements of @block_specs that are valid. */
318 /* This is simply filled in with zeroes and used to avoid special-casing
319 * the output of the first compressed Huffman code, which conceptually
320 * has a delta taken from a code with all symbols having zero-length
322 struct lzx_codes zero_codes;
324 /* The current cost model. */
325 struct lzx_costs costs;
327 /* Fast algorithm only: Array of hash table links. */
328 input_idx_t *prev_tab;
330 /* Slow algorithm only: Suffix array match-finder. */
331 struct lz_sarray lz_sarray;
333 /* Position in window of next match to return. */
334 input_idx_t match_window_pos;
336 /* The match-finder shall ensure the length of matches does not exceed
337 * this position in the input. */
338 input_idx_t match_window_end;
340 /* Matches found by the match-finder are cached in the following array
341 * to achieve a slight speedup when the same matches are needed on
342 * subsequent passes. This is suboptimal because different matches may
343 * be preferred with different cost models, but seems to be a worthwhile
345 struct raw_match *cached_matches;
346 unsigned cached_matches_pos;
350 struct lz_match_chooser mc;
353 /* Returns the LZX position slot that corresponds to a given match offset,
354 * taking into account the recent offset queue and updating it if the offset is
357 lzx_get_position_slot(unsigned offset, struct lzx_lru_queue *queue)
359 unsigned position_slot;
361 /* See if the offset was recently used. */
362 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
363 if (offset == queue->R[i]) {
366 /* Bring the repeat offset to the front of the
367 * queue. Note: this is, in fact, not a real
368 * LRU queue because repeat matches are simply
369 * swapped to the front. */
370 swap(queue->R[0], queue->R[i]);
372 /* The resulting position slot is simply the first index
373 * at which the offset was found in the queue. */
378 /* The offset was not recently used; look up its real position slot. */
379 position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
381 /* Bring the new offset to the front of the queue. */
382 for (unsigned i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
383 queue->R[i] = queue->R[i - 1];
384 queue->R[0] = offset;
386 return position_slot;
389 /* Build the main, length, and aligned offset Huffman codes used in LZX.
391 * This takes as input the frequency tables for each code and produces as output
392 * a set of tables that map symbols to codewords and codeword lengths. */
394 lzx_make_huffman_codes(const struct lzx_freqs *freqs,
395 struct lzx_codes *codes,
396 unsigned num_main_syms)
398 make_canonical_huffman_code(num_main_syms,
399 LZX_MAX_MAIN_CODEWORD_LEN,
402 codes->codewords.main);
404 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
405 LZX_MAX_LEN_CODEWORD_LEN,
408 codes->codewords.len);
410 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
411 LZX_MAX_ALIGNED_CODEWORD_LEN,
414 codes->codewords.aligned);
418 * Output a precomputed LZX match.
421 * The bitstream to which to write the match.
423 * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
424 * LZX_BLOCKTYPE_VERBATIM)
426 * The match, as a (length, offset) pair.
428 * Pointer to a structure that contains the codewords for the main, length,
429 * and aligned offset Huffman codes for the current LZX compressed block.
432 lzx_write_match(struct output_bitstream *out, int block_type,
433 struct lzx_match match, const struct lzx_codes *codes)
435 /* low 8 bits are the match length minus 2 */
436 unsigned match_len_minus_2 = match.data & 0xff;
437 /* Next 17 bits are the position footer */
438 unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
439 /* Next 6 bits are the position slot. */
440 unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
443 unsigned main_symbol;
444 unsigned num_extra_bits;
445 unsigned verbatim_bits;
446 unsigned aligned_bits;
448 /* If the match length is less than MIN_MATCH_LEN (= 2) +
449 * NUM_PRIMARY_LENS (= 7), the length header contains
450 * the match length minus MIN_MATCH_LEN, and there is no
453 * Otherwise, the length header contains
454 * NUM_PRIMARY_LENS, and the length footer contains
455 * the match length minus NUM_PRIMARY_LENS minus
457 if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
458 len_header = match_len_minus_2;
460 len_header = LZX_NUM_PRIMARY_LENS;
461 len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
464 /* Combine the position slot with the length header into a single symbol
465 * that will be encoded with the main code.
467 * The actual main symbol is offset by LZX_NUM_CHARS because values
468 * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
470 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
472 /* Output main symbol. */
473 bitstream_put_bits(out, codes->codewords.main[main_symbol],
474 codes->lens.main[main_symbol]);
476 /* If there is a length footer, output it using the
477 * length Huffman code. */
478 if (len_header == LZX_NUM_PRIMARY_LENS)
479 bitstream_put_bits(out, codes->codewords.len[len_footer],
480 codes->lens.len[len_footer]);
482 num_extra_bits = lzx_get_num_extra_bits(position_slot);
484 /* For aligned offset blocks with at least 3 extra bits, output the
485 * verbatim bits literally, then the aligned bits encoded using the
486 * aligned offset code. Otherwise, only the verbatim bits need to be
488 if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
490 verbatim_bits = position_footer >> 3;
491 bitstream_put_bits(out, verbatim_bits,
494 aligned_bits = (position_footer & 7);
495 bitstream_put_bits(out,
496 codes->codewords.aligned[aligned_bits],
497 codes->lens.aligned[aligned_bits]);
499 /* verbatim bits is the same as the position
500 * footer, in this case. */
501 bitstream_put_bits(out, position_footer, num_extra_bits);
505 /* Output an LZX literal (encoded with the main Huffman code). */
507 lzx_write_literal(struct output_bitstream *out, u8 literal,
508 const struct lzx_codes *codes)
510 bitstream_put_bits(out,
511 codes->codewords.main[literal],
512 codes->lens.main[literal]);
516 lzx_build_precode(const u8 lens[restrict],
517 const u8 prev_lens[restrict],
518 const unsigned num_syms,
519 input_idx_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
520 u8 output_syms[restrict num_syms],
521 u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
522 u32 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
523 unsigned *num_additional_bits_ret)
525 memset(precode_freqs, 0,
526 LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
528 /* Since the code word lengths use a form of RLE encoding, the goal here
529 * is to find each run of identical lengths when going through them in
530 * symbol order (including runs of length 1). For each run, as many
531 * lengths are encoded using RLE as possible, and the rest are output
534 * output_syms[] will be filled in with the length symbols that will be
535 * output, including RLE codes, not yet encoded using the precode.
537 * cur_run_len keeps track of how many code word lengths are in the
538 * current run of identical lengths. */
539 unsigned output_syms_idx = 0;
540 unsigned cur_run_len = 1;
541 unsigned num_additional_bits = 0;
542 for (unsigned i = 1; i <= num_syms; i++) {
544 if (i != num_syms && lens[i] == lens[i - 1]) {
545 /* Still in a run--- keep going. */
550 /* Run ended! Check if it is a run of zeroes or a run of
553 /* The symbol that was repeated in the run--- not to be confused
554 * with the length *of* the run (cur_run_len) */
555 unsigned len_in_run = lens[i - 1];
557 if (len_in_run == 0) {
558 /* A run of 0's. Encode it in as few length
559 * codes as we can. */
561 /* The magic length 18 indicates a run of 20 + n zeroes,
562 * where n is an uncompressed literal 5-bit integer that
563 * follows the magic length. */
564 while (cur_run_len >= 20) {
565 unsigned additional_bits;
567 additional_bits = min(cur_run_len - 20, 0x1f);
568 num_additional_bits += 5;
570 output_syms[output_syms_idx++] = 18;
571 output_syms[output_syms_idx++] = additional_bits;
572 cur_run_len -= 20 + additional_bits;
575 /* The magic length 17 indicates a run of 4 + n zeroes,
576 * where n is an uncompressed literal 4-bit integer that
577 * follows the magic length. */
578 while (cur_run_len >= 4) {
579 unsigned additional_bits;
581 additional_bits = min(cur_run_len - 4, 0xf);
582 num_additional_bits += 4;
584 output_syms[output_syms_idx++] = 17;
585 output_syms[output_syms_idx++] = additional_bits;
586 cur_run_len -= 4 + additional_bits;
591 /* A run of nonzero lengths. */
593 /* The magic length 19 indicates a run of 4 + n
594 * nonzeroes, where n is a literal bit that follows the
595 * magic length, and where the value of the lengths in
596 * the run is given by an extra length symbol, encoded
597 * with the precode, that follows the literal bit.
599 * The extra length symbol is encoded as a difference
600 * from the length of the codeword for the first symbol
601 * in the run in the previous code.
603 while (cur_run_len >= 4) {
604 unsigned additional_bits;
607 additional_bits = (cur_run_len > 4);
608 num_additional_bits += 1;
609 delta = (signed char)prev_lens[i - cur_run_len] -
610 (signed char)len_in_run;
614 precode_freqs[(unsigned char)delta]++;
615 output_syms[output_syms_idx++] = 19;
616 output_syms[output_syms_idx++] = additional_bits;
617 output_syms[output_syms_idx++] = delta;
618 cur_run_len -= 4 + additional_bits;
622 /* Any remaining lengths in the run are outputted without RLE,
623 * as a difference from the length of that codeword in the
625 while (cur_run_len > 0) {
628 delta = (signed char)prev_lens[i - cur_run_len] -
629 (signed char)len_in_run;
633 precode_freqs[(unsigned char)delta]++;
634 output_syms[output_syms_idx++] = delta;
641 /* Build the precode from the frequencies of the length symbols. */
643 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
644 LZX_MAX_PRE_CODEWORD_LEN,
645 precode_freqs, precode_lens,
648 *num_additional_bits_ret = num_additional_bits;
650 return output_syms_idx;
654 * Output a Huffman code in the compressed form used in LZX.
656 * The Huffman code is represented in the output as a logical series of codeword
657 * lengths from which the Huffman code, which must be in canonical form, can be
660 * The codeword lengths are themselves compressed using a separate Huffman code,
661 * the "precode", which contains a symbol for each possible codeword length in
662 * the larger code as well as several special symbols to represent repeated
663 * codeword lengths (a form of run-length encoding). The precode is itself
664 * constructed in canonical form, and its codeword lengths are represented
665 * literally in 20 4-bit fields that immediately precede the compressed codeword
666 * lengths of the larger code.
668 * Furthermore, the codeword lengths of the larger code are actually represented
669 * as deltas from the codeword lengths of the corresponding code in the previous
673 * Bitstream to which to write the compressed Huffman code.
675 * The codeword lengths, indexed by symbol, in the Huffman code.
677 * The codeword lengths, indexed by symbol, in the corresponding Huffman
678 * code in the previous block, or all zeroes if this is the first block.
680 * The number of symbols in the Huffman code.
683 lzx_write_compressed_code(struct output_bitstream *out,
684 const u8 lens[restrict],
685 const u8 prev_lens[restrict],
688 input_idx_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
689 u8 output_syms[num_syms];
690 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
691 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
693 unsigned num_output_syms;
697 num_output_syms = lzx_build_precode(lens,
706 /* Write the lengths of the precode codes to the output. */
707 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
708 bitstream_put_bits(out, precode_lens[i],
709 LZX_PRECODE_ELEMENT_SIZE);
711 /* Write the length symbols, encoded with the precode, to the output. */
713 for (i = 0; i < num_output_syms; ) {
714 precode_sym = output_syms[i++];
716 bitstream_put_bits(out, precode_codewords[precode_sym],
717 precode_lens[precode_sym]);
718 switch (precode_sym) {
720 bitstream_put_bits(out, output_syms[i++], 4);
723 bitstream_put_bits(out, output_syms[i++], 5);
726 bitstream_put_bits(out, output_syms[i++], 1);
727 bitstream_put_bits(out,
728 precode_codewords[output_syms[i]],
729 precode_lens[output_syms[i]]);
739 * Write all matches and literal bytes (which were precomputed) in an LZX
740 * compressed block to the output bitstream in the final compressed
744 * The output bitstream.
746 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
747 * LZX_BLOCKTYPE_VERBATIM).
749 * The array of matches/literals to output.
751 * Number of matches/literals to output (length of @match_tab).
753 * The main, length, and aligned offset Huffman codes for the current
754 * LZX compressed block.
757 lzx_write_matches_and_literals(struct output_bitstream *ostream,
759 const struct lzx_match match_tab[],
760 unsigned match_count,
761 const struct lzx_codes *codes)
763 for (unsigned i = 0; i < match_count; i++) {
764 struct lzx_match match = match_tab[i];
766 /* The high bit of the 32-bit intermediate representation
767 * indicates whether the item is an actual LZ-style match (1) or
768 * a literal byte (0). */
769 if (match.data & 0x80000000)
770 lzx_write_match(ostream, block_type, match, codes);
772 lzx_write_literal(ostream, match.data, codes);
777 lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
779 #ifdef ENABLE_LZX_DEBUG
782 for (i = 0; i < num_main_syms; i++)
783 LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
785 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
786 LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
788 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
789 LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
791 const unsigned tablebits = 10;
792 u16 decode_table[(1 << tablebits) +
793 (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
794 _aligned_attribute(DECODE_TABLE_ALIGNMENT);
795 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
797 min(tablebits, LZX_MAINCODE_TABLEBITS),
799 LZX_MAX_MAIN_CODEWORD_LEN));
800 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
801 LZX_LENCODE_NUM_SYMBOLS,
802 min(tablebits, LZX_LENCODE_TABLEBITS),
804 LZX_MAX_LEN_CODEWORD_LEN));
805 LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
806 LZX_ALIGNEDCODE_NUM_SYMBOLS,
807 min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
809 LZX_MAX_ALIGNED_CODEWORD_LEN));
810 #endif /* ENABLE_LZX_DEBUG */
813 /* Write an LZX aligned offset or verbatim block to the output. */
815 lzx_write_compressed_block(int block_type,
817 unsigned max_window_size,
818 unsigned num_main_syms,
819 struct lzx_match * chosen_matches,
820 unsigned num_chosen_matches,
821 const struct lzx_codes * codes,
822 const struct lzx_codes * prev_codes,
823 struct output_bitstream * ostream)
827 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
828 block_type == LZX_BLOCKTYPE_VERBATIM);
829 lzx_assert_codes_valid(codes, num_main_syms);
831 /* The first three bits indicate the type of block and are one of the
832 * LZX_BLOCKTYPE_* constants. */
833 bitstream_put_bits(ostream, block_type, 3);
835 /* Output the block size.
837 * The original LZX format seemed to always encode the block size in 3
838 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
839 * uses the first bit to indicate whether the block is the default size
840 * (32768) or a different size given explicitly by the next 16 bits.
842 * By default, this compressor uses a window size of 32768 and therefore
843 * follows the WIMGAPI behavior. However, this compressor also supports
844 * window sizes greater than 32768 bytes, which do not appear to be
845 * supported by WIMGAPI. In such cases, we retain the default size bit
846 * to mean a size of 32768 bytes but output non-default block size in 24
847 * bits rather than 16. The compatibility of this behavior is unknown
848 * because WIMs created with chunk size greater than 32768 can seemingly
849 * only be opened by wimlib anyway. */
850 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
851 bitstream_put_bits(ostream, 1, 1);
853 bitstream_put_bits(ostream, 0, 1);
855 if (max_window_size >= 65536)
856 bitstream_put_bits(ostream, block_size >> 16, 8);
858 bitstream_put_bits(ostream, block_size, 16);
861 /* Write out lengths of the main code. Note that the LZX specification
862 * incorrectly states that the aligned offset code comes after the
863 * length code, but in fact it is the very first code to be written
864 * (before the main code). */
865 if (block_type == LZX_BLOCKTYPE_ALIGNED)
866 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
867 bitstream_put_bits(ostream, codes->lens.aligned[i],
868 LZX_ALIGNEDCODE_ELEMENT_SIZE);
870 LZX_DEBUG("Writing main code...");
872 /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
873 * the main code, which are the codewords for literal bytes. */
874 lzx_write_compressed_code(ostream,
876 prev_codes->lens.main,
879 /* Write the precode and lengths for the rest of the main code, which
880 * are the codewords for match headers. */
881 lzx_write_compressed_code(ostream,
882 codes->lens.main + LZX_NUM_CHARS,
883 prev_codes->lens.main + LZX_NUM_CHARS,
884 num_main_syms - LZX_NUM_CHARS);
886 LZX_DEBUG("Writing length code...");
888 /* Write the precode and lengths for the length code. */
889 lzx_write_compressed_code(ostream,
891 prev_codes->lens.len,
892 LZX_LENCODE_NUM_SYMBOLS);
894 LZX_DEBUG("Writing matches and literals...");
896 /* Write the actual matches and literals. */
897 lzx_write_matches_and_literals(ostream, block_type,
898 chosen_matches, num_chosen_matches,
901 LZX_DEBUG("Done writing block.");
904 /* Write out the LZX blocks that were computed. */
906 lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
909 const struct lzx_codes *prev_codes = &ctx->zero_codes;
910 for (unsigned i = 0; i < ctx->num_blocks; i++) {
911 const struct lzx_block_spec *spec = &ctx->block_specs[i];
913 LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
914 i + 1, ctx->num_blocks,
915 spec->block_type, spec->block_size,
916 spec->num_chosen_matches);
918 lzx_write_compressed_block(spec->block_type,
920 ctx->max_window_size,
922 &ctx->chosen_matches[spec->chosen_matches_start_pos],
923 spec->num_chosen_matches,
928 prev_codes = &spec->codes;
932 /* Constructs an LZX match from a literal byte and updates the main code symbol
935 lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
941 /* Constructs an LZX match from an offset and a length, and updates the LRU
942 * queue and the frequency of symbols in the main, length, and aligned offset
943 * alphabets. The return value is a 32-bit number that provides the match in an
944 * intermediate representation documented below. */
946 lzx_tally_match(unsigned match_len, unsigned match_offset,
947 struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
949 unsigned position_slot;
950 unsigned position_footer;
952 unsigned main_symbol;
954 unsigned adjusted_match_len;
956 LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
958 /* The match offset shall be encoded as a position slot (itself encoded
959 * as part of the main symbol) and a position footer. */
960 position_slot = lzx_get_position_slot(match_offset, queue);
961 position_footer = (match_offset + LZX_OFFSET_OFFSET) &
962 ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
964 /* The match length shall be encoded as a length header (itself encoded
965 * as part of the main symbol) and an optional length footer. */
966 adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
967 if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
968 /* No length footer needed. */
969 len_header = adjusted_match_len;
971 /* Length footer needed. It will be encoded using the length
973 len_header = LZX_NUM_PRIMARY_LENS;
974 len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
975 freqs->len[len_footer]++;
978 /* Account for the main symbol. */
979 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
981 freqs->main[main_symbol]++;
983 /* In an aligned offset block, 3 bits of the position footer are output
984 * as an aligned offset symbol. Account for this, although we may
985 * ultimately decide to output the block as verbatim. */
987 /* The following check is equivalent to:
989 * if (lzx_extra_bits[position_slot] >= 3)
991 * Note that this correctly excludes position slots that correspond to
993 if (position_slot >= 8)
994 freqs->aligned[position_footer & 7]++;
996 /* Pack the position slot, position footer, and match length into an
997 * intermediate representation. See `struct lzx_match' for details.
999 LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
1000 LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
1001 LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
1003 LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
1004 LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
1005 LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
1007 (position_slot << 25) |
1008 (position_footer << 8) |
1009 (adjusted_match_len);
1012 struct lzx_record_ctx {
1013 struct lzx_freqs freqs;
1014 struct lzx_lru_queue queue;
1015 struct lzx_match *matches;
1019 lzx_record_match(unsigned len, unsigned offset, void *_ctx)
1021 struct lzx_record_ctx *ctx = _ctx;
1023 (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
1027 lzx_record_literal(u8 lit, void *_ctx)
1029 struct lzx_record_ctx *ctx = _ctx;
1031 (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
1034 /* Returns the cost, in bits, to output a literal byte using the specified cost
1037 lzx_literal_cost(u8 c, const struct lzx_costs * costs)
1039 return costs->main[c];
1042 /* Given a (length, offset) pair that could be turned into a valid LZX match as
1043 * well as costs for the codewords in the main, length, and aligned Huffman
1044 * codes, return the approximate number of bits it will take to represent this
1045 * match in the compressed output. Take into account the match offset LRU
1046 * queue and optionally update it. */
1048 lzx_match_cost(unsigned length, unsigned offset, const struct lzx_costs *costs,
1049 struct lzx_lru_queue *queue)
1051 unsigned position_slot;
1052 unsigned len_header, main_symbol;
1055 position_slot = lzx_get_position_slot(offset, queue);
1057 len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1058 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1060 /* Account for main symbol. */
1061 cost += costs->main[main_symbol];
1063 /* Account for extra position information. */
1064 unsigned num_extra_bits = lzx_get_num_extra_bits(position_slot);
1065 if (num_extra_bits >= 3) {
1066 cost += num_extra_bits - 3;
1067 cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1069 cost += num_extra_bits;
1072 /* Account for extra length information. */
1073 if (len_header == LZX_NUM_PRIMARY_LENS)
1074 cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1080 /* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
1081 * Unlike lzx_match_cost() which does a true cost evaluation, this simply
1082 * prioritize matches based on their offset. */
1084 lzx_match_cost_fast(input_idx_t length, input_idx_t offset, const void *_queue)
1086 const struct lzx_lru_queue *queue = _queue;
1088 /* It seems well worth it to take the time to give priority to recently
1090 for (input_idx_t i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
1091 if (offset == queue->R[i])
1097 /* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
1100 * The cost model and codeword lengths are almost the same thing, but the
1101 * Huffman codewords with length 0 correspond to symbols with zero frequency
1102 * that still need to be assigned actual costs. The specific values assigned
1103 * are arbitrary, but they should be fairly high (near the maximum codeword
1104 * length) to take into account the fact that uses of these symbols are expected
1107 lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
1110 unsigned num_main_syms = ctx->num_main_syms;
1113 for (i = 0; i < num_main_syms; i++) {
1114 ctx->costs.main[i] = lens->main[i];
1115 if (ctx->costs.main[i] == 0)
1116 ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
1120 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
1121 ctx->costs.len[i] = lens->len[i];
1122 if (ctx->costs.len[i] == 0)
1123 ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
1126 /* Aligned offset code */
1127 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1128 ctx->costs.aligned[i] = lens->aligned[i];
1129 if (ctx->costs.aligned[i] == 0)
1130 ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
1134 /* Tell the match-finder to skip the specified number of bytes (@n) in the
1137 lzx_lz_skip_bytes(struct lzx_compressor *ctx, input_idx_t n)
1139 LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
1140 if (ctx->matches_cached) {
1141 ctx->match_window_pos += n;
1143 ctx->cached_matches_pos +=
1144 ctx->cached_matches[ctx->cached_matches_pos].len + 1;
1148 ctx->cached_matches[ctx->cached_matches_pos++].len = 0;
1149 lz_sarray_skip_position(&ctx->lz_sarray);
1150 ctx->match_window_pos++;
1152 LZX_ASSERT(lz_sarray_get_pos(&ctx->lz_sarray) == ctx->match_window_pos);
1156 /* Retrieve a list of matches available at the next position in the input.
1158 * A pointer to the matches array is written into @matches_ret, and the return
1159 * value is the number of matches found. */
1161 lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
1162 const struct lzx_lru_queue *queue,
1163 struct raw_match **matches_ret)
1166 struct raw_match *matches;
1168 LZX_ASSERT(ctx->match_window_pos <= ctx->match_window_end);
1170 matches = &ctx->cached_matches[ctx->cached_matches_pos + 1];
1172 if (ctx->matches_cached) {
1173 num_matches = matches[-1].len;
1175 LZX_ASSERT(lz_sarray_get_pos(&ctx->lz_sarray) == ctx->match_window_pos);
1176 num_matches = lz_sarray_get_matches(&ctx->lz_sarray,
1178 lzx_match_cost_fast,
1180 matches[-1].len = num_matches;
1182 ctx->cached_matches_pos += num_matches + 1;
1183 *matches_ret = matches;
1185 /* Cap the length of returned matches to the number of bytes remaining,
1186 * if it is not the whole window. */
1187 if (ctx->match_window_end < ctx->window_size) {
1188 unsigned maxlen = ctx->match_window_end - ctx->match_window_pos;
1189 for (u32 i = 0; i < num_matches; i++)
1190 if (matches[i].len > maxlen)
1191 matches[i].len = maxlen;
1194 fprintf(stderr, "Pos %u/%u: %u matches\n",
1195 ctx->match_window_pos, ctx->match_window_end, num_matches);
1196 for (unsigned i = 0; i < num_matches; i++)
1197 fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
1200 #ifdef ENABLE_LZX_DEBUG
1201 for (u32 i = 0; i < num_matches; i++) {
1202 LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
1203 LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
1204 LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
1205 LZX_ASSERT(matches[i].offset > 0);
1206 LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
1207 LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
1208 &ctx->window[ctx->match_window_pos - matches[i].offset],
1213 ctx->match_window_pos++;
1218 lzx_get_prev_literal_cost(struct lzx_compressor *ctx,
1219 struct lzx_lru_queue *queue)
1221 return lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
1226 lzx_get_match_cost(struct lzx_compressor *ctx,
1227 struct lzx_lru_queue *queue,
1228 input_idx_t length, input_idx_t offset)
1230 return lzx_match_cost(length, offset, &ctx->costs, queue);
1233 static struct raw_match
1234 lzx_lz_get_near_optimal_match(struct lzx_compressor *ctx)
1236 return lz_get_near_optimal_match(&ctx->mc,
1237 lzx_lz_get_matches_caching,
1239 lzx_get_prev_literal_cost,
1245 /* Set default symbol costs for the LZX Huffman codes. */
1247 lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
1251 /* Main code (part 1): Literal symbols */
1252 for (i = 0; i < LZX_NUM_CHARS; i++)
1255 /* Main code (part 2): Match header symbols */
1256 for (; i < num_main_syms; i++)
1257 costs->main[i] = 10;
1260 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1263 /* Aligned offset code */
1264 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1265 costs->aligned[i] = 3;
1268 /* Given the frequencies of symbols in an LZX-compressed block and the
1269 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1270 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1271 * will take fewer bits to output. */
1273 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1274 const struct lzx_codes * codes)
1276 unsigned aligned_cost = 0;
1277 unsigned verbatim_cost = 0;
1279 /* Verbatim blocks have a constant 3 bits per position footer. Aligned
1280 * offset blocks have an aligned offset symbol per position footer, plus
1281 * an extra 24 bits per block to output the lengths necessary to
1282 * reconstruct the aligned offset code itself. */
1283 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1284 verbatim_cost += 3 * freqs->aligned[i];
1285 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1287 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1288 if (aligned_cost < verbatim_cost)
1289 return LZX_BLOCKTYPE_ALIGNED;
1291 return LZX_BLOCKTYPE_VERBATIM;
1294 /* Find a near-optimal sequence of matches/literals with which to output the
1295 * specified LZX block, then set the block's type to that which has the minimum
1296 * cost to output (either verbatim or aligned). */
1298 lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
1299 unsigned num_passes)
1301 const struct lzx_lru_queue orig_queue = ctx->queue;
1302 struct lzx_freqs freqs;
1304 unsigned orig_window_pos = spec->window_pos;
1305 unsigned orig_cached_pos = ctx->cached_matches_pos;
1307 LZX_ASSERT(ctx->match_window_pos == spec->window_pos);
1309 ctx->match_window_end = spec->window_pos + spec->block_size;
1310 spec->chosen_matches_start_pos = spec->window_pos;
1312 LZX_ASSERT(num_passes >= 1);
1314 /* The first optimal parsing pass is done using the cost model already
1315 * set in ctx->costs. Each later pass is done using a cost model
1316 * computed from the previous pass. */
1317 for (unsigned pass = 0; pass < num_passes; pass++) {
1319 ctx->match_window_pos = orig_window_pos;
1320 ctx->cached_matches_pos = orig_cached_pos;
1321 ctx->queue = orig_queue;
1322 spec->num_chosen_matches = 0;
1323 memset(&freqs, 0, sizeof(freqs));
1325 for (unsigned i = spec->window_pos; i < spec->window_pos + spec->block_size; ) {
1326 struct raw_match raw_match;
1327 struct lzx_match lzx_match;
1329 raw_match = lzx_lz_get_near_optimal_match(ctx);
1330 if (raw_match.len >= LZX_MIN_MATCH_LEN) {
1331 if (unlikely(raw_match.len == LZX_MIN_MATCH_LEN &&
1332 raw_match.offset == ctx->max_window_size -
1335 /* Degenerate case where the parser
1336 * generated the minimum match length
1337 * with the maximum offset. There
1338 * aren't actually enough position slots
1339 * to represent this offset, as noted in
1341 * lzx_get_num_main_syms(), so we cannot
1342 * allow it. Use literals instead.
1344 * Note that this case only occurs if
1345 * the match-finder can generate matches
1346 * to the very start of the window. The
1347 * suffix array match-finder can,
1348 * although typical hash chain and
1349 * binary tree match-finders use 0 as a
1350 * null value and therefore cannot
1351 * generate such matches. */
1352 BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
1353 lzx_match.data = lzx_tally_literal(ctx->window[i],
1356 ctx->chosen_matches[spec->chosen_matches_start_pos +
1357 spec->num_chosen_matches++]
1359 lzx_match.data = lzx_tally_literal(ctx->window[i],
1363 lzx_match.data = lzx_tally_match(raw_match.len,
1370 lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
1373 ctx->chosen_matches[spec->chosen_matches_start_pos +
1374 spec->num_chosen_matches++] = lzx_match;
1377 lzx_make_huffman_codes(&freqs, &spec->codes,
1378 ctx->num_main_syms);
1379 if (pass < num_passes - 1)
1380 lzx_set_costs(ctx, &spec->codes.lens);
1381 ctx->matches_cached = true;
1383 spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
1384 ctx->matches_cached = false;
1388 lzx_optimize_blocks(struct lzx_compressor *ctx)
1390 lzx_lru_queue_init(&ctx->queue);
1391 lz_match_chooser_begin(&ctx->mc);
1393 const unsigned num_passes = ctx->params.alg_params.slow.num_optim_passes;
1395 for (unsigned i = 0; i < ctx->num_blocks; i++)
1396 lzx_optimize_block(ctx, &ctx->block_specs[i], num_passes);
1399 /* Prepare the input window into one or more LZX blocks ready to be output. */
1401 lzx_prepare_blocks(struct lzx_compressor * ctx)
1403 /* Initialize the match-finder. */
1404 lz_sarray_load_window(&ctx->lz_sarray, ctx->window, ctx->window_size);
1405 ctx->cached_matches_pos = 0;
1406 ctx->matches_cached = false;
1407 ctx->match_window_pos = 0;
1409 /* Set up a default cost model. */
1410 lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
1412 /* TODO: The compression ratio could be slightly improved by performing
1413 * data-dependent block splitting instead of using fixed-size blocks.
1414 * Doing so well is a computationally hard problem, however. */
1415 ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
1416 for (unsigned i = 0; i < ctx->num_blocks; i++) {
1417 unsigned pos = LZX_DIV_BLOCK_SIZE * i;
1418 ctx->block_specs[i].window_pos = pos;
1419 ctx->block_specs[i].block_size = min(ctx->window_size - pos, LZX_DIV_BLOCK_SIZE);
1422 /* Determine sequence of matches/literals to output for each block. */
1423 lzx_optimize_blocks(ctx);
1427 * This is the fast version of lzx_prepare_blocks(). This version "quickly"
1428 * prepares a single compressed block containing the entire input. See the
1429 * description of the "Fast algorithm" at the beginning of this file for more
1432 * Input --- the preprocessed data:
1437 * Output --- the block specification and the corresponding match/literal data:
1439 * ctx->block_specs[]
1441 * ctx->chosen_matches[]
1444 lzx_prepare_block_fast(struct lzx_compressor * ctx)
1446 struct lzx_record_ctx record_ctx;
1447 struct lzx_block_spec *spec;
1449 /* Parameters to hash chain LZ match finder
1450 * (lazy with 1 match lookahead) */
1451 static const struct lz_params lzx_lz_params = {
1452 /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
1453 * aren't worth choosing when using greedy or lazy parsing. */
1455 .max_match = LZX_MAX_MATCH_LEN,
1456 .max_offset = LZX_MAX_WINDOW_SIZE,
1457 .good_match = LZX_MAX_MATCH_LEN,
1458 .nice_match = LZX_MAX_MATCH_LEN,
1459 .max_chain_len = LZX_MAX_MATCH_LEN,
1460 .max_lazy_match = LZX_MAX_MATCH_LEN,
1464 /* Initialize symbol frequencies and match offset LRU queue. */
1465 memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
1466 lzx_lru_queue_init(&record_ctx.queue);
1467 record_ctx.matches = ctx->chosen_matches;
1469 /* Determine series of matches/literals to output. */
1470 lz_analyze_block(ctx->window,
1478 /* Set up block specification. */
1479 spec = &ctx->block_specs[0];
1480 spec->block_type = LZX_BLOCKTYPE_ALIGNED;
1481 spec->window_pos = 0;
1482 spec->block_size = ctx->window_size;
1483 spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
1484 spec->chosen_matches_start_pos = 0;
1485 lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
1486 ctx->num_main_syms);
1487 ctx->num_blocks = 1;
1491 do_call_insn_translation(u32 *call_insn_target, int input_pos,
1497 rel_offset = le32_to_cpu(*call_insn_target);
1498 if (rel_offset >= -input_pos && rel_offset < file_size) {
1499 if (rel_offset < file_size - input_pos) {
1500 /* "good translation" */
1501 abs_offset = rel_offset + input_pos;
1503 /* "compensating translation" */
1504 abs_offset = rel_offset - file_size;
1506 *call_insn_target = cpu_to_le32(abs_offset);
1510 /* This is the reverse of undo_call_insn_preprocessing() in lzx-decompress.c.
1511 * See the comment above that function for more information. */
1513 do_call_insn_preprocessing(u8 data[], int size)
1515 for (int i = 0; i < size - 10; i++) {
1516 if (data[i] == 0xe8) {
1517 do_call_insn_translation((u32*)&data[i + 1], i,
1518 LZX_WIM_MAGIC_FILESIZE);
1525 lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
1526 void *compressed_data, size_t compressed_size_avail, void *_ctx)
1528 struct lzx_compressor *ctx = _ctx;
1529 struct output_bitstream ostream;
1530 size_t compressed_size;
1532 if (uncompressed_size < 100) {
1533 LZX_DEBUG("Too small to bother compressing.");
1537 if (uncompressed_size > ctx->max_window_size) {
1538 LZX_DEBUG("Can't compress %zu bytes using window of %u bytes!",
1539 uncompressed_size, ctx->max_window_size);
1543 LZX_DEBUG("Attempting to compress %zu bytes...",
1546 /* The input data must be preprocessed. To avoid changing the original
1547 * input, copy it to a temporary buffer. */
1548 memcpy(ctx->window, uncompressed_data, uncompressed_size);
1549 ctx->window_size = uncompressed_size;
1551 /* This line is unnecessary; it just avoids inconsequential accesses of
1552 * uninitialized memory that would show up in memory-checking tools such
1554 memset(&ctx->window[ctx->window_size], 0, 12);
1556 LZX_DEBUG("Preprocessing data...");
1558 /* Before doing any actual compression, do the call instruction (0xe8
1559 * byte) translation on the uncompressed data. */
1560 do_call_insn_preprocessing(ctx->window, ctx->window_size);
1562 LZX_DEBUG("Preparing blocks...");
1564 /* Prepare the compressed data. */
1565 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
1566 lzx_prepare_block_fast(ctx);
1568 lzx_prepare_blocks(ctx);
1570 LZX_DEBUG("Writing compressed blocks...");
1572 /* Generate the compressed data. */
1573 init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
1574 lzx_write_all_blocks(ctx, &ostream);
1576 LZX_DEBUG("Flushing bitstream...");
1577 compressed_size = flush_output_bitstream(&ostream);
1578 if (compressed_size == ~(input_idx_t)0) {
1579 LZX_DEBUG("Data did not compress to %zu bytes or less!",
1580 compressed_size_avail);
1584 LZX_DEBUG("Done: compressed %zu => %zu bytes.",
1585 uncompressed_size, compressed_size);
1587 /* Verify that we really get the same thing back when decompressing.
1588 * Although this could be disabled by default in all cases, it only
1589 * takes around 2-3% of the running time of the slow algorithm to do the
1591 if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
1592 #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
1597 struct wimlib_decompressor *decompressor;
1599 if (0 == wimlib_create_decompressor(WIMLIB_COMPRESSION_TYPE_LZX,
1600 ctx->max_window_size,
1605 ret = wimlib_decompress(compressed_data,
1610 wimlib_free_decompressor(decompressor);
1613 ERROR("Failed to decompress data we "
1614 "compressed using LZX algorithm");
1618 if (memcmp(uncompressed_data, ctx->window, uncompressed_size)) {
1619 ERROR("Data we compressed using LZX algorithm "
1620 "didn't decompress to original");
1625 WARNING("Failed to create decompressor for "
1626 "data verification!");
1629 return compressed_size;
1633 lzx_free_compressor(void *_ctx)
1635 struct lzx_compressor *ctx = _ctx;
1638 FREE(ctx->chosen_matches);
1639 FREE(ctx->cached_matches);
1640 lz_match_chooser_destroy(&ctx->mc);
1641 lz_sarray_destroy(&ctx->lz_sarray);
1642 FREE(ctx->block_specs);
1643 FREE(ctx->prev_tab);
1649 static const struct wimlib_lzx_compressor_params lzx_fast_default = {
1651 .size = sizeof(struct wimlib_lzx_compressor_params),
1653 .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
1660 static const struct wimlib_lzx_compressor_params lzx_slow_default = {
1662 .size = sizeof(struct wimlib_lzx_compressor_params),
1664 .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
1668 .use_len2_matches = 1,
1669 .nice_match_length = 32,
1670 .num_optim_passes = 2,
1671 .max_search_depth = 50,
1672 .max_matches_per_pos = 3,
1673 .main_nostat_cost = 15,
1674 .len_nostat_cost = 15,
1675 .aligned_nostat_cost = 7,
1680 static const struct wimlib_lzx_compressor_params *
1681 lzx_get_params(const struct wimlib_compressor_params_header *_params)
1683 const struct wimlib_lzx_compressor_params *params =
1684 (const struct wimlib_lzx_compressor_params*)_params;
1686 if (params == NULL) {
1687 LZX_DEBUG("Using default algorithm and parameters.");
1688 params = &lzx_slow_default;
1690 if (params->use_defaults) {
1691 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
1692 params = &lzx_slow_default;
1694 params = &lzx_fast_default;
1701 lzx_create_compressor(size_t window_size,
1702 const struct wimlib_compressor_params_header *_params,
1705 const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
1706 struct lzx_compressor *ctx;
1708 LZX_DEBUG("Allocating LZX context...");
1710 if (!lzx_window_size_valid(window_size))
1711 return WIMLIB_ERR_INVALID_PARAM;
1713 LZX_DEBUG("Allocating memory.");
1715 ctx = CALLOC(1, sizeof(struct lzx_compressor));
1719 ctx->num_main_syms = lzx_get_num_main_syms(window_size);
1720 ctx->max_window_size = window_size;
1721 ctx->window = MALLOC(window_size + 12);
1722 if (ctx->window == NULL)
1725 if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
1726 ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
1727 if (ctx->prev_tab == NULL)
1731 size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
1732 ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
1733 if (ctx->block_specs == NULL)
1736 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
1737 unsigned min_match_len = LZX_MIN_MATCH_LEN;
1738 if (!params->alg_params.slow.use_len2_matches)
1739 min_match_len = max(min_match_len, 3);
1741 if (!lz_sarray_init(&ctx->lz_sarray,
1745 params->alg_params.slow.max_search_depth,
1746 params->alg_params.slow.max_matches_per_pos))
1750 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
1751 if (!lz_match_chooser_init(&ctx->mc,
1752 LZX_OPTIM_ARRAY_SIZE,
1753 params->alg_params.slow.nice_match_length,
1758 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
1761 cache_per_pos = params->alg_params.slow.max_matches_per_pos;
1762 if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
1763 cache_per_pos = LZX_MAX_CACHE_PER_POS;
1765 ctx->cached_matches = MALLOC(window_size * (cache_per_pos + 1) *
1766 sizeof(ctx->cached_matches[0]));
1767 if (ctx->cached_matches == NULL)
1771 ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
1772 if (ctx->chosen_matches == NULL)
1775 memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_compressor_params));
1776 memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
1778 LZX_DEBUG("Successfully allocated new LZX context.");
1784 lzx_free_compressor(ctx);
1785 return WIMLIB_ERR_NOMEM;
1789 lzx_get_needed_memory(size_t max_block_size,
1790 const struct wimlib_compressor_params_header *_params)
1792 const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
1796 size += sizeof(struct lzx_compressor);
1798 size += max_block_size + 12;
1800 size += DIV_ROUND_UP(max_block_size, LZX_DIV_BLOCK_SIZE) *
1801 sizeof(((struct lzx_compressor*)0)->block_specs[0]);
1803 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
1804 size += max_block_size * sizeof(((struct lzx_compressor*)0)->chosen_matches[0]);
1805 size += lz_sarray_get_needed_memory(max_block_size);
1806 size += lz_match_chooser_get_needed_memory(LZX_OPTIM_ARRAY_SIZE,
1807 params->alg_params.slow.nice_match_length,
1811 cache_per_pos = params->alg_params.slow.max_matches_per_pos;
1812 if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
1813 cache_per_pos = LZX_MAX_CACHE_PER_POS;
1815 size += max_block_size * (cache_per_pos + 1) *
1816 sizeof(((struct lzx_compressor*)0)->cached_matches[0]);
1818 size += max_block_size * sizeof(((struct lzx_compressor*)0)->prev_tab[0]);
1824 lzx_params_valid(const struct wimlib_compressor_params_header *_params)
1826 const struct wimlib_lzx_compressor_params *params =
1827 (const struct wimlib_lzx_compressor_params*)_params;
1829 if (params->hdr.size != sizeof(struct wimlib_lzx_compressor_params)) {
1830 LZX_DEBUG("Invalid parameter structure size!");
1834 if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
1835 params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
1837 LZX_DEBUG("Invalid algorithm.");
1841 if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW &&
1842 !params->use_defaults)
1844 if (params->alg_params.slow.num_optim_passes < 1)
1846 LZX_DEBUG("Invalid number of optimization passes!");
1850 if (params->alg_params.slow.main_nostat_cost < 1 ||
1851 params->alg_params.slow.main_nostat_cost > 16)
1853 LZX_DEBUG("Invalid main_nostat_cost!");
1857 if (params->alg_params.slow.len_nostat_cost < 1 ||
1858 params->alg_params.slow.len_nostat_cost > 16)
1860 LZX_DEBUG("Invalid len_nostat_cost!");
1864 if (params->alg_params.slow.aligned_nostat_cost < 1 ||
1865 params->alg_params.slow.aligned_nostat_cost > 8)
1867 LZX_DEBUG("Invalid aligned_nostat_cost!");
1874 const struct compressor_ops lzx_compressor_ops = {
1875 .params_valid = lzx_params_valid,
1876 .get_needed_memory = lzx_get_needed_memory,
1877 .create_compressor = lzx_create_compressor,
1878 .compress = lzx_compress,
1879 .free_compressor = lzx_free_compressor,