4 * A compressor that produces output compatible with the LZX compression format.
8 * Copyright (C) 2012, 2013, 2014 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
28 * This file contains a compressor for the LZX ("Lempel-Ziv eXtended"?)
29 * compression format, as used in the WIM (Windows IMaging) file format. This
30 * code may need some slight modifications to be used outside of the WIM format.
31 * In particular, in other situations the LZX block header might be slightly
32 * different, and a sliding window rather than a fixed-size window might be
35 * ----------------------------------------------------------------------------
39 * The primary reference for LZX is the specification released by Microsoft.
40 * However, the comments in lzx-decompress.c provide more information about LZX
41 * and note some errors in the Microsoft specification.
43 * LZX shares many similarities with DEFLATE, the format used by zlib and gzip.
44 * Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain details
45 * are quite similar, such as the method for storing Huffman codes. However,
46 * the main differences are:
48 * - LZX preprocesses the data to attempt to make x86 machine code slightly more
49 * compressible before attempting to compress it further.
51 * - LZX uses a "main" alphabet which combines literals and matches, with the
52 * match symbols containing a "length header" (giving all or part of the match
53 * length) and a "position slot" (giving, roughly speaking, the order of
54 * magnitude of the match offset).
56 * - LZX does not have static Huffman blocks (that is, the kind with preset
57 * Huffman codes); however it does have two types of dynamic Huffman blocks
58 * ("verbatim" and "aligned").
60 * - LZX has a minimum match length of 2 rather than 3.
62 * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
63 * queue of match offsets. This is very useful for certain types of files,
64 * such as binary files that have repeating records.
66 * ----------------------------------------------------------------------------
68 * Algorithmic Overview
70 * At a high level, any implementation of LZX compression must operate as
73 * 1. Preprocess the input data to translate the targets of 32-bit x86 call
74 * instructions to absolute offsets. (Actually, this is required for WIM,
75 * but might not be in other places LZX is used.)
77 * 2. Find a sequence of LZ77-style matches and literal bytes that expands to
78 * the preprocessed data.
80 * 3. Divide the match/literal sequence into one or more LZX blocks, each of
81 * which may be "uncompressed", "verbatim", or "aligned".
83 * 4. Output each LZX block.
85 * Step (1) is fairly straightforward. It requires looking for 0xe8 bytes in
86 * the input data and performing a translation on the 4 bytes following each
89 * Step (4) is complicated, but it is mostly determined by the LZX format. The
90 * only real choice we have is what algorithm to use to build the length-limited
91 * canonical Huffman codes. See lzx_write_all_blocks() for details.
93 * That leaves steps (2) and (3) as where all the hard stuff happens. Focusing
94 * on step (2), we need to do LZ77-style parsing on the input data, or "window",
95 * to divide it into a sequence of matches and literals. Each position in the
96 * window might have multiple matches associated with it, and we need to choose
97 * which one, if any, to actually use. Therefore, the problem can really be
98 * divided into two areas of concern: (a) finding matches at a given position,
99 * which we shall call "match-finding", and (b) choosing whether to use a
100 * match or a literal at a given position, and if using a match, which one (if
101 * there is more than one available). We shall call this "match-choosing". We
102 * first consider match-finding, then match-choosing.
104 * ----------------------------------------------------------------------------
108 * Given a position in the window, we want to find LZ77-style "matches" with
109 * that position at previous positions in the window. With LZX, the minimum
110 * match length is 2 and the maximum match length is 257. The only restriction
111 * on offsets is that LZX does not allow the last 2 bytes of the window to match
112 * the beginning of the window.
114 * There are a number of algorithms that can be used for this, including hash
115 * chains, binary trees, and suffix arrays. Binary trees generally work well
116 * for LZX compression since it uses medium-size windows (2^15 to 2^21 bytes).
117 * However, when compressing in a fast mode where many positions are skipped
118 * (not searched for matches), hash chains are faster.
120 * Since the match-finders are not specific to LZX, I will not explain them in
121 * detail here. Instead, see lz_hash_chains.c and lz_binary_trees.c.
123 * ----------------------------------------------------------------------------
127 * Usually, choosing the longest match is best because it encodes the most data
128 * in that one item. However, sometimes the longest match is not optimal
129 * because (a) choosing a long match now might prevent using an even longer
130 * match later, or (b) more generally, what we actually care about is the number
131 * of bits it will ultimately take to output each match or literal, which is
132 * actually dependent on the entropy encoding using by the underlying
133 * compression format. Consequently, a longer match usually, but not always,
134 * takes fewer bits to encode than multiple shorter matches or literals that
135 * cover the same data.
137 * This problem of choosing the truly best match/literal sequence is probably
138 * impossible to solve efficiently when combined with entropy encoding. If we
139 * knew how many bits it takes to output each match/literal, then we could
140 * choose the optimal sequence using shortest-path search a la Dijkstra's
141 * algorithm. However, with entropy encoding, the chosen match/literal sequence
142 * affects its own encoding. Therefore, we can't know how many bits it will
143 * take to actually output any one match or literal until we have actually
144 * chosen the full sequence of matches and literals.
146 * Notwithstanding the entropy encoding problem, we also aren't guaranteed to
147 * choose the optimal match/literal sequence unless the match-finder (see
148 * section "Match-finder") provides the match-chooser with all possible matches
149 * at each position. However, this is not computationally efficient. For
150 * example, there might be many matches of the same length, and usually (but not
151 * always) the best choice is the one with the smallest offset. So in practice,
152 * it's fine to only consider the smallest offset for a given match length at a
153 * given position. (Actually, for LZX, it's also worth considering repeat
156 * In addition, as mentioned earlier, in LZX we have the choice of using
157 * multiple blocks, each of which resets the Huffman codes. This expands the
158 * search space even further. Therefore, to simplify the problem, we currently
159 * we don't attempt to actually choose the LZX blocks based on the data.
160 * Instead, we just divide the data into fixed-size blocks of LZX_DIV_BLOCK_SIZE
161 * bytes each, and always use verbatim or aligned blocks (never uncompressed).
162 * A previous version of this code recursively split the input data into
163 * equal-sized blocks, up to a maximum depth, and chose the lowest-cost block
164 * divisions. However, this made compression much slower and did not actually
165 * help very much. It remains an open question whether a sufficiently fast and
166 * useful block-splitting algorithm is possible for LZX. Essentially the same
167 * problem also applies to DEFLATE. The Microsoft LZX compressor seemingly does
168 * do block splitting, although I don't know how fast or useful it is,
171 * Now, back to the entropy encoding problem. The "solution" is to use an
172 * iterative approach to compute a good, but not necessarily optimal,
173 * match/literal sequence. Start with a fixed assignment of symbol costs and
174 * choose an "optimal" match/literal sequence based on those costs, using
175 * shortest-path seach a la Dijkstra's algorithm. Then, for each iteration of
176 * the optimization, update the costs based on the entropy encoding of the
177 * current match/literal sequence, then choose a new match/literal sequence
178 * based on the updated costs. Usually, the actual cost to output the current
179 * match/literal sequence will decrease in each iteration until it converges on
180 * a fixed point. This result may not be the truly optimal match/literal
181 * sequence, but it usually is much better than one chosen by doing a "greedy"
182 * parse where we always chooe the longest match.
184 * An alternative to both greedy parsing and iterative, near-optimal parsing is
185 * "lazy" parsing. Briefly, "lazy" parsing considers just the longest match at
186 * each position, but it waits to choose that match until it has also examined
187 * the next position. This is actually a useful approach; it's used by zlib,
188 * for example. Therefore, for fast compression we combine lazy parsing with
189 * the hash chain max-finder. For normal/high compression we combine
190 * near-optimal parsing with the binary tree match-finder.
197 #include "wimlib/compressor_ops.h"
198 #include "wimlib/compress_common.h"
199 #include "wimlib/error.h"
200 #include "wimlib/lz_mf.h"
201 #include "wimlib/lzx.h"
202 #include "wimlib/util.h"
205 #define LZX_OPTIM_ARRAY_LENGTH 4096
207 #define LZX_DIV_BLOCK_SIZE 32768
209 #define LZX_CACHE_PER_POS 8
211 #define LZX_MAX_MATCHES_PER_POS (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
213 #define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
215 /* Codewords for the LZX main, length, and aligned offset Huffman codes */
216 struct lzx_codewords {
217 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
218 u32 len[LZX_LENCODE_NUM_SYMBOLS];
219 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
222 /* Codeword lengths (in bits) for the LZX main, length, and aligned offset
225 * A 0 length means the codeword has zero frequency.
228 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
229 u8 len[LZX_LENCODE_NUM_SYMBOLS];
230 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
233 /* Costs for the LZX main, length, and aligned offset Huffman symbols.
235 * If a codeword has zero frequency, it must still be assigned some nonzero cost
236 * --- generally a high cost, since even if it gets used in the next iteration,
237 * it probably will not be used very many times. */
239 u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
240 u8 len[LZX_LENCODE_NUM_SYMBOLS];
241 u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
244 /* The LZX main, length, and aligned offset Huffman codes */
246 struct lzx_codewords codewords;
247 struct lzx_lens lens;
250 /* Tables for tallying symbol frequencies in the three LZX alphabets */
252 u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
253 u32 len[LZX_LENCODE_NUM_SYMBOLS];
254 u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
257 /* LZX intermediate match/literal format */
261 * 31 1 if a match, 0 if a literal.
263 * 30-25 position slot. This can be at most 50, so it will fit in 6
266 * 8-24 position footer. This is the offset of the real formatted
267 * offset from the position base. This can be at most 17 bits
268 * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
270 * 0-7 length of match, minus 2. This can be at most
271 * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
275 /* Specification for an LZX block. */
276 struct lzx_block_spec {
278 /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
282 /* 0-based position in the window at which this block starts. */
285 /* The number of bytes of uncompressed data this block represents. */
288 /* The match/literal sequence for this block. */
289 struct lzx_item *chosen_items;
291 /* The length of the @chosen_items sequence. */
292 u32 num_chosen_items;
294 /* Huffman codes for this block. */
295 struct lzx_codes codes;
298 struct lzx_compressor;
300 struct lzx_compressor_params {
301 struct lz_match (*choose_item_func)(struct lzx_compressor *);
302 enum lz_mf_algo mf_algo;
303 u32 num_optim_passes;
304 u32 min_match_length;
305 u32 nice_match_length;
306 u32 max_search_depth;
309 /* State of the LZX compressor. */
310 struct lzx_compressor {
312 /* The buffer of data to be compressed.
314 * 0xe8 byte preprocessing is done directly on the data here before
315 * further compression.
317 * Note that this compressor does *not* use a real sliding window!!!!
318 * It's not needed in the WIM format, since every chunk is compressed
319 * independently. This is by design, to allow random access to the
323 /* Number of bytes of data to be compressed, which is the number of
324 * bytes of data in @cur_window that are actually valid. */
327 /* Allocated size of @cur_window. */
330 /* Compression parameters. */
331 struct lzx_compressor_params params;
333 unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
334 void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
336 /* Number of symbols in the main alphabet (depends on the
337 * @max_window_size since it determines the maximum allowed offset). */
338 unsigned num_main_syms;
340 /* The current match offset LRU queue. */
341 struct lzx_lru_queue queue;
343 /* Space for the sequences of matches/literals that were chosen for each
345 struct lzx_item *chosen_items;
347 /* Information about the LZX blocks the preprocessed input was divided
349 struct lzx_block_spec *block_specs;
351 /* Number of LZX blocks the input was divided into; a.k.a. the number of
352 * elements of @block_specs that are valid. */
355 /* This is simply filled in with zeroes and used to avoid special-casing
356 * the output of the first compressed Huffman code, which conceptually
357 * has a delta taken from a code with all symbols having zero-length
359 struct lzx_codes zero_codes;
361 /* The current cost model. */
362 struct lzx_costs costs;
364 /* Lempel-Ziv match-finder. */
367 /* Position in window of next match to return. */
368 u32 match_window_pos;
370 /* The end-of-block position. We can't allow any matches to span this
372 u32 match_window_end;
374 /* When doing more than one match-choosing pass over the data, matches
375 * found by the match-finder are cached in the following array to
376 * achieve a slight speedup when the same matches are needed on
377 * subsequent passes. This is suboptimal because different matches may
378 * be preferred with different cost models, but seems to be a worthwhile
380 struct lz_match *cached_matches;
381 struct lz_match *cache_ptr;
382 struct lz_match *cache_limit;
384 /* Match-chooser state, used when doing near-optimal parsing.
386 * When matches have been chosen, optimum_cur_idx is set to the position
387 * in the window of the next match/literal to return and optimum_end_idx
388 * is set to the position in the window at the end of the last
389 * match/literal to return. */
390 struct lzx_mc_pos_data *optimum;
391 unsigned optimum_cur_idx;
392 unsigned optimum_end_idx;
394 /* Previous match, used when doing lazy parsing. */
395 struct lz_match prev_match;
399 * Match chooser position data:
401 * An array of these structures is used during the match-choosing algorithm.
402 * They correspond to consecutive positions in the window and are used to keep
403 * track of the cost to reach each position, and the match/literal choices that
404 * need to be chosen to reach that position.
406 struct lzx_mc_pos_data {
407 /* The approximate minimum cost, in bits, to reach this position in the
408 * window which has been found so far. */
410 #define MC_INFINITE_COST ((u32)~0UL)
412 /* The union here is just for clarity, since the fields are used in two
413 * slightly different ways. Initially, the @prev structure is filled in
414 * first, and links go from later in the window to earlier in the
415 * window. Later, @next structure is filled in and links go from
416 * earlier in the window to later in the window. */
419 /* Position of the start of the match or literal that
420 * was taken to get to this position in the approximate
421 * minimum-cost parse. */
424 /* Offset (as in an LZ (length, offset) pair) of the
425 * match or literal that was taken to get to this
426 * position in the approximate minimum-cost parse. */
430 /* Position at which the match or literal starting at
431 * this position ends in the minimum-cost parse. */
434 /* Offset (as in an LZ (length, offset) pair) of the
435 * match or literal starting at this position in the
436 * approximate minimum-cost parse. */
441 /* Adaptive state that exists after an approximate minimum-cost path to
442 * reach this position is taken. */
443 struct lzx_lru_queue queue;
446 /* Returns the LZX position slot that corresponds to a given match offset,
447 * taking into account the recent offset queue and updating it if the offset is
450 lzx_get_position_slot(u32 offset, struct lzx_lru_queue *queue)
452 unsigned position_slot;
454 /* See if the offset was recently used. */
455 for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
456 if (offset == queue->R[i]) {
459 /* Bring the repeat offset to the front of the
460 * queue. Note: this is, in fact, not a real
461 * LRU queue because repeat matches are simply
462 * swapped to the front. */
463 swap(queue->R[0], queue->R[i]);
465 /* The resulting position slot is simply the first index
466 * at which the offset was found in the queue. */
471 /* The offset was not recently used; look up its real position slot. */
472 position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
474 /* Bring the new offset to the front of the queue. */
475 for (int i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
476 queue->R[i] = queue->R[i - 1];
477 queue->R[0] = offset;
479 return position_slot;
482 /* Build the main, length, and aligned offset Huffman codes used in LZX.
484 * This takes as input the frequency tables for each code and produces as output
485 * a set of tables that map symbols to codewords and codeword lengths. */
487 lzx_make_huffman_codes(const struct lzx_freqs *freqs,
488 struct lzx_codes *codes,
489 unsigned num_main_syms)
491 make_canonical_huffman_code(num_main_syms,
492 LZX_MAX_MAIN_CODEWORD_LEN,
495 codes->codewords.main);
497 make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
498 LZX_MAX_LEN_CODEWORD_LEN,
501 codes->codewords.len);
503 make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
504 LZX_MAX_ALIGNED_CODEWORD_LEN,
507 codes->codewords.aligned);
511 * Output a precomputed LZX match.
514 * The bitstream to which to write the match.
516 * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
517 * LZX_BLOCKTYPE_VERBATIM)
521 * Pointer to a structure that contains the codewords for the main, length,
522 * and aligned offset Huffman codes for the current LZX compressed block.
525 lzx_write_match(struct output_bitstream *out, int block_type,
526 struct lzx_item match, const struct lzx_codes *codes)
528 /* low 8 bits are the match length minus 2 */
529 unsigned match_len_minus_2 = match.data & 0xff;
530 /* Next 17 bits are the position footer */
531 unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
532 /* Next 6 bits are the position slot. */
533 unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
536 unsigned main_symbol;
537 unsigned num_extra_bits;
538 unsigned verbatim_bits;
539 unsigned aligned_bits;
541 /* If the match length is less than MIN_MATCH_LEN (= 2) +
542 * NUM_PRIMARY_LENS (= 7), the length header contains
543 * the match length minus MIN_MATCH_LEN, and there is no
546 * Otherwise, the length header contains
547 * NUM_PRIMARY_LENS, and the length footer contains
548 * the match length minus NUM_PRIMARY_LENS minus
550 if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
551 len_header = match_len_minus_2;
553 len_header = LZX_NUM_PRIMARY_LENS;
554 len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
557 /* Combine the position slot with the length header into a single symbol
558 * that will be encoded with the main code.
560 * The actual main symbol is offset by LZX_NUM_CHARS because values
561 * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
563 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
565 /* Output main symbol. */
566 bitstream_put_bits(out, codes->codewords.main[main_symbol],
567 codes->lens.main[main_symbol]);
569 /* If there is a length footer, output it using the
570 * length Huffman code. */
571 if (len_header == LZX_NUM_PRIMARY_LENS)
572 bitstream_put_bits(out, codes->codewords.len[len_footer],
573 codes->lens.len[len_footer]);
575 num_extra_bits = lzx_get_num_extra_bits(position_slot);
577 /* For aligned offset blocks with at least 3 extra bits, output the
578 * verbatim bits literally, then the aligned bits encoded using the
579 * aligned offset code. Otherwise, only the verbatim bits need to be
581 if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
583 verbatim_bits = position_footer >> 3;
584 bitstream_put_bits(out, verbatim_bits,
587 aligned_bits = (position_footer & 7);
588 bitstream_put_bits(out,
589 codes->codewords.aligned[aligned_bits],
590 codes->lens.aligned[aligned_bits]);
592 /* verbatim bits is the same as the position
593 * footer, in this case. */
594 bitstream_put_bits(out, position_footer, num_extra_bits);
598 /* Output an LZX literal (encoded with the main Huffman code). */
600 lzx_write_literal(struct output_bitstream *out, u8 literal,
601 const struct lzx_codes *codes)
603 bitstream_put_bits(out,
604 codes->codewords.main[literal],
605 codes->lens.main[literal]);
609 lzx_build_precode(const u8 lens[restrict],
610 const u8 prev_lens[restrict],
611 const unsigned num_syms,
612 u32 precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
613 u8 output_syms[restrict num_syms],
614 u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
615 u32 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
616 unsigned *num_additional_bits_ret)
618 memset(precode_freqs, 0,
619 LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
621 /* Since the code word lengths use a form of RLE encoding, the goal here
622 * is to find each run of identical lengths when going through them in
623 * symbol order (including runs of length 1). For each run, as many
624 * lengths are encoded using RLE as possible, and the rest are output
627 * output_syms[] will be filled in with the length symbols that will be
628 * output, including RLE codes, not yet encoded using the precode.
630 * cur_run_len keeps track of how many code word lengths are in the
631 * current run of identical lengths. */
632 unsigned output_syms_idx = 0;
633 unsigned cur_run_len = 1;
634 unsigned num_additional_bits = 0;
635 for (unsigned i = 1; i <= num_syms; i++) {
637 if (i != num_syms && lens[i] == lens[i - 1]) {
638 /* Still in a run--- keep going. */
643 /* Run ended! Check if it is a run of zeroes or a run of
646 /* The symbol that was repeated in the run--- not to be confused
647 * with the length *of* the run (cur_run_len) */
648 unsigned len_in_run = lens[i - 1];
650 if (len_in_run == 0) {
651 /* A run of 0's. Encode it in as few length
652 * codes as we can. */
654 /* The magic length 18 indicates a run of 20 + n zeroes,
655 * where n is an uncompressed literal 5-bit integer that
656 * follows the magic length. */
657 while (cur_run_len >= 20) {
658 unsigned additional_bits;
660 additional_bits = min(cur_run_len - 20, 0x1f);
661 num_additional_bits += 5;
663 output_syms[output_syms_idx++] = 18;
664 output_syms[output_syms_idx++] = additional_bits;
665 cur_run_len -= 20 + additional_bits;
668 /* The magic length 17 indicates a run of 4 + n zeroes,
669 * where n is an uncompressed literal 4-bit integer that
670 * follows the magic length. */
671 while (cur_run_len >= 4) {
672 unsigned additional_bits;
674 additional_bits = min(cur_run_len - 4, 0xf);
675 num_additional_bits += 4;
677 output_syms[output_syms_idx++] = 17;
678 output_syms[output_syms_idx++] = additional_bits;
679 cur_run_len -= 4 + additional_bits;
684 /* A run of nonzero lengths. */
686 /* The magic length 19 indicates a run of 4 + n
687 * nonzeroes, where n is a literal bit that follows the
688 * magic length, and where the value of the lengths in
689 * the run is given by an extra length symbol, encoded
690 * with the precode, that follows the literal bit.
692 * The extra length symbol is encoded as a difference
693 * from the length of the codeword for the first symbol
694 * in the run in the previous code.
696 while (cur_run_len >= 4) {
697 unsigned additional_bits;
700 additional_bits = (cur_run_len > 4);
701 num_additional_bits += 1;
702 delta = (signed char)prev_lens[i - cur_run_len] -
703 (signed char)len_in_run;
707 precode_freqs[(unsigned char)delta]++;
708 output_syms[output_syms_idx++] = 19;
709 output_syms[output_syms_idx++] = additional_bits;
710 output_syms[output_syms_idx++] = delta;
711 cur_run_len -= 4 + additional_bits;
715 /* Any remaining lengths in the run are outputted without RLE,
716 * as a difference from the length of that codeword in the
718 while (cur_run_len > 0) {
721 delta = (signed char)prev_lens[i - cur_run_len] -
722 (signed char)len_in_run;
726 precode_freqs[(unsigned char)delta]++;
727 output_syms[output_syms_idx++] = delta;
734 /* Build the precode from the frequencies of the length symbols. */
736 make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
737 LZX_MAX_PRE_CODEWORD_LEN,
738 precode_freqs, precode_lens,
741 *num_additional_bits_ret = num_additional_bits;
743 return output_syms_idx;
747 * Output a Huffman code in the compressed form used in LZX.
749 * The Huffman code is represented in the output as a logical series of codeword
750 * lengths from which the Huffman code, which must be in canonical form, can be
753 * The codeword lengths are themselves compressed using a separate Huffman code,
754 * the "precode", which contains a symbol for each possible codeword length in
755 * the larger code as well as several special symbols to represent repeated
756 * codeword lengths (a form of run-length encoding). The precode is itself
757 * constructed in canonical form, and its codeword lengths are represented
758 * literally in 20 4-bit fields that immediately precede the compressed codeword
759 * lengths of the larger code.
761 * Furthermore, the codeword lengths of the larger code are actually represented
762 * as deltas from the codeword lengths of the corresponding code in the previous
766 * Bitstream to which to write the compressed Huffman code.
768 * The codeword lengths, indexed by symbol, in the Huffman code.
770 * The codeword lengths, indexed by symbol, in the corresponding Huffman
771 * code in the previous block, or all zeroes if this is the first block.
773 * The number of symbols in the Huffman code.
776 lzx_write_compressed_code(struct output_bitstream *out,
777 const u8 lens[restrict],
778 const u8 prev_lens[restrict],
781 u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
782 u8 output_syms[num_syms];
783 u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
784 u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
786 unsigned num_output_syms;
790 num_output_syms = lzx_build_precode(lens,
799 /* Write the lengths of the precode codes to the output. */
800 for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
801 bitstream_put_bits(out, precode_lens[i],
802 LZX_PRECODE_ELEMENT_SIZE);
804 /* Write the length symbols, encoded with the precode, to the output. */
806 for (i = 0; i < num_output_syms; ) {
807 precode_sym = output_syms[i++];
809 bitstream_put_bits(out, precode_codewords[precode_sym],
810 precode_lens[precode_sym]);
811 switch (precode_sym) {
813 bitstream_put_bits(out, output_syms[i++], 4);
816 bitstream_put_bits(out, output_syms[i++], 5);
819 bitstream_put_bits(out, output_syms[i++], 1);
820 bitstream_put_bits(out,
821 precode_codewords[output_syms[i]],
822 precode_lens[output_syms[i]]);
832 * Write all matches and literal bytes (which were precomputed) in an LZX
833 * compressed block to the output bitstream in the final compressed
837 * The output bitstream.
839 * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
840 * LZX_BLOCKTYPE_VERBATIM).
842 * The array of matches/literals to output.
844 * Number of matches/literals to output (length of @items).
846 * The main, length, and aligned offset Huffman codes for the current
847 * LZX compressed block.
850 lzx_write_items(struct output_bitstream *ostream, int block_type,
851 const struct lzx_item items[], u32 num_items,
852 const struct lzx_codes *codes)
854 for (u32 i = 0; i < num_items; i++) {
855 /* The high bit of the 32-bit intermediate representation
856 * indicates whether the item is an actual LZ-style match (1) or
857 * a literal byte (0). */
858 if (items[i].data & 0x80000000)
859 lzx_write_match(ostream, block_type, items[i], codes);
861 lzx_write_literal(ostream, items[i].data, codes);
865 /* Write an LZX aligned offset or verbatim block to the output. */
867 lzx_write_compressed_block(int block_type,
869 unsigned max_window_size,
870 unsigned num_main_syms,
871 struct lzx_item * chosen_items,
872 unsigned num_chosen_items,
873 const struct lzx_codes * codes,
874 const struct lzx_codes * prev_codes,
875 struct output_bitstream * ostream)
879 LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
880 block_type == LZX_BLOCKTYPE_VERBATIM);
882 /* The first three bits indicate the type of block and are one of the
883 * LZX_BLOCKTYPE_* constants. */
884 bitstream_put_bits(ostream, block_type, 3);
886 /* Output the block size.
888 * The original LZX format seemed to always encode the block size in 3
889 * bytes. However, the implementation in WIMGAPI, as used in WIM files,
890 * uses the first bit to indicate whether the block is the default size
891 * (32768) or a different size given explicitly by the next 16 bits.
893 * By default, this compressor uses a window size of 32768 and therefore
894 * follows the WIMGAPI behavior. However, this compressor also supports
895 * window sizes greater than 32768 bytes, which do not appear to be
896 * supported by WIMGAPI. In such cases, we retain the default size bit
897 * to mean a size of 32768 bytes but output non-default block size in 24
898 * bits rather than 16. The compatibility of this behavior is unknown
899 * because WIMs created with chunk size greater than 32768 can seemingly
900 * only be opened by wimlib anyway. */
901 if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
902 bitstream_put_bits(ostream, 1, 1);
904 bitstream_put_bits(ostream, 0, 1);
906 if (max_window_size >= 65536)
907 bitstream_put_bits(ostream, block_size >> 16, 8);
909 bitstream_put_bits(ostream, block_size, 16);
912 /* Write out lengths of the main code. Note that the LZX specification
913 * incorrectly states that the aligned offset code comes after the
914 * length code, but in fact it is the very first code to be written
915 * (before the main code). */
916 if (block_type == LZX_BLOCKTYPE_ALIGNED)
917 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
918 bitstream_put_bits(ostream, codes->lens.aligned[i],
919 LZX_ALIGNEDCODE_ELEMENT_SIZE);
921 /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
922 * the main code, which are the codewords for literal bytes. */
923 lzx_write_compressed_code(ostream,
925 prev_codes->lens.main,
928 /* Write the precode and lengths for the rest of the main code, which
929 * are the codewords for match headers. */
930 lzx_write_compressed_code(ostream,
931 codes->lens.main + LZX_NUM_CHARS,
932 prev_codes->lens.main + LZX_NUM_CHARS,
933 num_main_syms - LZX_NUM_CHARS);
935 /* Write the precode and lengths for the length code. */
936 lzx_write_compressed_code(ostream,
938 prev_codes->lens.len,
939 LZX_LENCODE_NUM_SYMBOLS);
941 /* Write the actual matches and literals. */
942 lzx_write_items(ostream, block_type,
943 chosen_items, num_chosen_items, codes);
946 /* Write out the LZX blocks that were computed. */
948 lzx_write_all_blocks(struct lzx_compressor *c, struct output_bitstream *ostream)
951 const struct lzx_codes *prev_codes = &c->zero_codes;
952 for (unsigned i = 0; i < c->num_blocks; i++) {
953 const struct lzx_block_spec *spec = &c->block_specs[i];
955 LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_items=%u)...",
956 i + 1, c->num_blocks,
957 spec->block_type, spec->block_size,
958 spec->num_chosen_items);
960 lzx_write_compressed_block(spec->block_type,
965 spec->num_chosen_items,
970 prev_codes = &spec->codes;
974 /* Constructs an LZX match from a literal byte and updates the main code symbol
977 lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
983 /* Constructs an LZX match from an offset and a length, and updates the LRU
984 * queue and the frequency of symbols in the main, length, and aligned offset
985 * alphabets. The return value is a 32-bit number that provides the match in an
986 * intermediate representation documented below. */
988 lzx_tally_match(unsigned match_len, u32 match_offset,
989 struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
991 unsigned position_slot;
992 unsigned position_footer;
994 unsigned main_symbol;
996 unsigned adjusted_match_len;
998 LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
1000 /* The match offset shall be encoded as a position slot (itself encoded
1001 * as part of the main symbol) and a position footer. */
1002 position_slot = lzx_get_position_slot(match_offset, queue);
1003 position_footer = (match_offset + LZX_OFFSET_OFFSET) &
1004 ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
1006 /* The match length shall be encoded as a length header (itself encoded
1007 * as part of the main symbol) and an optional length footer. */
1008 adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
1009 if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
1010 /* No length footer needed. */
1011 len_header = adjusted_match_len;
1013 /* Length footer needed. It will be encoded using the length
1015 len_header = LZX_NUM_PRIMARY_LENS;
1016 len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
1017 freqs->len[len_footer]++;
1020 /* Account for the main symbol. */
1021 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1023 freqs->main[main_symbol]++;
1025 /* In an aligned offset block, 3 bits of the position footer are output
1026 * as an aligned offset symbol. Account for this, although we may
1027 * ultimately decide to output the block as verbatim. */
1029 /* The following check is equivalent to:
1031 * if (lzx_extra_bits[position_slot] >= 3)
1033 * Note that this correctly excludes position slots that correspond to
1034 * recent offsets. */
1035 if (position_slot >= 8)
1036 freqs->aligned[position_footer & 7]++;
1038 /* Pack the position slot, position footer, and match length into an
1039 * intermediate representation. See `struct lzx_item' for details.
1041 LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
1042 LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
1043 LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
1045 LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
1046 LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
1047 LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
1049 (position_slot << 25) |
1050 (position_footer << 8) |
1051 (adjusted_match_len);
1054 /* Returns the cost, in bits, to output a literal byte using the specified cost
1057 lzx_literal_cost(u8 c, const struct lzx_costs * costs)
1059 return costs->main[c];
1062 /* Given a (length, offset) pair that could be turned into a valid LZX match as
1063 * well as costs for the codewords in the main, length, and aligned Huffman
1064 * codes, return the approximate number of bits it will take to represent this
1065 * match in the compressed output. Take into account the match offset LRU
1066 * queue and also updates it. */
1068 lzx_match_cost(unsigned length, u32 offset, const struct lzx_costs *costs,
1069 struct lzx_lru_queue *queue)
1071 unsigned position_slot;
1072 unsigned len_header, main_symbol;
1073 unsigned num_extra_bits;
1076 position_slot = lzx_get_position_slot(offset, queue);
1078 len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1079 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1081 /* Account for main symbol. */
1082 cost += costs->main[main_symbol];
1084 /* Account for extra position information. */
1085 num_extra_bits = lzx_get_num_extra_bits(position_slot);
1086 if (num_extra_bits >= 3) {
1087 cost += num_extra_bits - 3;
1088 cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1090 cost += num_extra_bits;
1093 /* Account for extra length information. */
1094 if (len_header == LZX_NUM_PRIMARY_LENS)
1095 cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1102 /* Set the cost model @c->costs from the Huffman codeword lengths specified in
1105 * The cost model and codeword lengths are almost the same thing, but the
1106 * Huffman codewords with length 0 correspond to symbols with zero frequency
1107 * that still need to be assigned actual costs. The specific values assigned
1108 * are arbitrary, but they should be fairly high (near the maximum codeword
1109 * length) to take into account the fact that uses of these symbols are expected
1112 lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens,
1118 for (i = 0; i < c->num_main_syms; i++)
1119 c->costs.main[i] = lens->main[i] ? lens->main[i] : nostat;
1122 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1123 c->costs.len[i] = lens->len[i] ? lens->len[i] : nostat;
1125 /* Aligned offset code */
1126 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1127 c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : nostat / 2;
1130 /* Don't allow matches to span the end of an LZX block. */
1132 maybe_truncate_matches(struct lz_match matches[], u32 num_matches,
1133 struct lzx_compressor *c)
1135 if (c->match_window_end < c->cur_window_size && num_matches != 0) {
1136 u32 limit = c->match_window_end - c->match_window_pos;
1138 if (limit >= LZX_MIN_MATCH_LEN) {
1140 u32 i = num_matches - 1;
1142 if (matches[i].len >= limit) {
1143 matches[i].len = limit;
1145 /* Truncation might produce multiple
1146 * matches with length 'limit'. Keep at
1148 num_matches = i + 1;
1159 lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
1160 const struct lz_match **matches_ret)
1162 struct lz_match *cache_ptr;
1163 struct lz_match *matches;
1164 unsigned num_matches;
1166 cache_ptr = c->cache_ptr;
1167 matches = cache_ptr + 1;
1168 if (likely(cache_ptr <= c->cache_limit)) {
1169 num_matches = lz_mf_get_matches(c->mf, matches);
1170 cache_ptr->len = num_matches;
1171 c->cache_ptr = matches + num_matches;
1175 c->match_window_pos++;
1176 *matches_ret = matches;
1181 lzx_get_matches_fillcache_multiblock(struct lzx_compressor *c,
1182 const struct lz_match **matches_ret)
1184 struct lz_match *cache_ptr;
1185 struct lz_match *matches;
1186 unsigned num_matches;
1188 cache_ptr = c->cache_ptr;
1189 matches = cache_ptr + 1;
1190 if (likely(cache_ptr <= c->cache_limit)) {
1191 num_matches = lz_mf_get_matches(c->mf, matches);
1192 num_matches = maybe_truncate_matches(matches, num_matches, c);
1193 cache_ptr->len = num_matches;
1194 c->cache_ptr = matches + num_matches;
1198 c->match_window_pos++;
1199 *matches_ret = matches;
1204 lzx_get_matches_usecache(struct lzx_compressor *c,
1205 const struct lz_match **matches_ret)
1207 struct lz_match *cache_ptr;
1208 struct lz_match *matches;
1209 unsigned num_matches;
1211 cache_ptr = c->cache_ptr;
1212 matches = cache_ptr + 1;
1213 if (cache_ptr <= c->cache_limit) {
1214 num_matches = cache_ptr->len;
1215 c->cache_ptr = matches + num_matches;
1219 c->match_window_pos++;
1220 *matches_ret = matches;
1225 lzx_get_matches_usecache_nocheck(struct lzx_compressor *c,
1226 const struct lz_match **matches_ret)
1228 struct lz_match *cache_ptr;
1229 struct lz_match *matches;
1230 unsigned num_matches;
1232 cache_ptr = c->cache_ptr;
1233 matches = cache_ptr + 1;
1234 num_matches = cache_ptr->len;
1235 c->cache_ptr = matches + num_matches;
1236 c->match_window_pos++;
1237 *matches_ret = matches;
1242 lzx_get_matches_nocache_singleblock(struct lzx_compressor *c,
1243 const struct lz_match **matches_ret)
1245 struct lz_match *matches;
1246 unsigned num_matches;
1248 matches = c->cache_ptr;
1249 num_matches = lz_mf_get_matches(c->mf, matches);
1250 c->match_window_pos++;
1251 *matches_ret = matches;
1256 lzx_get_matches_nocache_multiblock(struct lzx_compressor *c,
1257 const struct lz_match **matches_ret)
1259 struct lz_match *matches;
1260 unsigned num_matches;
1262 matches = c->cache_ptr;
1263 num_matches = lz_mf_get_matches(c->mf, matches);
1264 num_matches = maybe_truncate_matches(matches, num_matches, c);
1265 c->match_window_pos++;
1266 *matches_ret = matches;
1271 * Find matches at the next position in the window.
1273 * Returns the number of matches found and sets *matches_ret to point to the
1274 * matches array. The matches will be sorted by strictly increasing length and
1277 static inline unsigned
1278 lzx_get_matches(struct lzx_compressor *c,
1279 const struct lz_match **matches_ret)
1281 return (*c->get_matches_func)(c, matches_ret);
1285 lzx_skip_bytes_fillcache(struct lzx_compressor *c, unsigned n)
1287 struct lz_match *cache_ptr;
1289 cache_ptr = c->cache_ptr;
1290 c->match_window_pos += n;
1291 lz_mf_skip_positions(c->mf, n);
1292 if (cache_ptr <= c->cache_limit) {
1296 } while (--n && cache_ptr <= c->cache_limit);
1298 c->cache_ptr = cache_ptr;
1302 lzx_skip_bytes_usecache(struct lzx_compressor *c, unsigned n)
1304 struct lz_match *cache_ptr;
1306 cache_ptr = c->cache_ptr;
1307 c->match_window_pos += n;
1308 if (cache_ptr <= c->cache_limit) {
1310 cache_ptr += 1 + cache_ptr->len;
1311 } while (--n && cache_ptr <= c->cache_limit);
1313 c->cache_ptr = cache_ptr;
1317 lzx_skip_bytes_usecache_nocheck(struct lzx_compressor *c, unsigned n)
1319 struct lz_match *cache_ptr;
1321 cache_ptr = c->cache_ptr;
1322 c->match_window_pos += n;
1324 cache_ptr += 1 + cache_ptr->len;
1326 c->cache_ptr = cache_ptr;
1330 lzx_skip_bytes_nocache(struct lzx_compressor *c, unsigned n)
1332 c->match_window_pos += n;
1333 lz_mf_skip_positions(c->mf, n);
1337 * Skip the specified number of positions in the window (don't search for
1341 lzx_skip_bytes(struct lzx_compressor *c, unsigned n)
1343 return (*c->skip_bytes_func)(c, n);
1347 * Reverse the linked list of near-optimal matches so that they can be returned
1348 * in forwards order.
1350 * Returns the first match in the list.
1352 static struct lz_match
1353 lzx_match_chooser_reverse_list(struct lzx_compressor *c, unsigned cur_pos)
1355 unsigned prev_link, saved_prev_link;
1356 unsigned prev_match_offset, saved_prev_match_offset;
1358 c->optimum_end_idx = cur_pos;
1360 saved_prev_link = c->optimum[cur_pos].prev.link;
1361 saved_prev_match_offset = c->optimum[cur_pos].prev.match_offset;
1364 prev_link = saved_prev_link;
1365 prev_match_offset = saved_prev_match_offset;
1367 saved_prev_link = c->optimum[prev_link].prev.link;
1368 saved_prev_match_offset = c->optimum[prev_link].prev.match_offset;
1370 c->optimum[prev_link].next.link = cur_pos;
1371 c->optimum[prev_link].next.match_offset = prev_match_offset;
1373 cur_pos = prev_link;
1374 } while (cur_pos != 0);
1376 c->optimum_cur_idx = c->optimum[0].next.link;
1378 return (struct lz_match)
1379 { .len = c->optimum_cur_idx,
1380 .offset = c->optimum[0].next.match_offset,
1385 * lzx_choose_near_optimal_match() -
1387 * Choose an approximately optimal match or literal to use at the next position
1388 * in the string, or "window", being LZ-encoded.
1390 * This is based on algorithms used in 7-Zip, including the DEFLATE encoder
1391 * and the LZMA encoder, written by Igor Pavlov.
1393 * Unlike a greedy parser that always takes the longest match, or even a "lazy"
1394 * parser with one match/literal look-ahead like zlib, the algorithm used here
1395 * may look ahead many matches/literals to determine the approximately optimal
1396 * match/literal to code next. The motivation is that the compression ratio is
1397 * improved if the compressor can do things like use a shorter-than-possible
1398 * match in order to allow a longer match later, and also take into account the
1399 * estimated real cost of coding each match/literal based on the underlying
1402 * Still, this is not a true optimal parser for several reasons:
1404 * - Real compression formats use entropy encoding of the literal/match
1405 * sequence, so the real cost of coding each match or literal is unknown until
1406 * the parse is fully determined. It can be approximated based on iterative
1407 * parses, but the end result is not guaranteed to be globally optimal.
1409 * - Very long matches are chosen immediately. This is because locations with
1410 * long matches are likely to have many possible alternatives that would cause
1411 * slow optimal parsing, but also such locations are already highly
1412 * compressible so it is not too harmful to just grab the longest match.
1414 * - Not all possible matches at each location are considered because the
1415 * underlying match-finder limits the number and type of matches produced at
1416 * each position. For example, for a given match length it's usually not
1417 * worth it to only consider matches other than the lowest-offset match,
1418 * except in the case of a repeat offset.
1420 * - Although we take into account the adaptive state (in LZX, the recent offset
1421 * queue), coding decisions made with respect to the adaptive state will be
1422 * locally optimal but will not necessarily be globally optimal. This is
1423 * because the algorithm only keeps the least-costly path to get to a given
1424 * location and does not take into account that a slightly more costly path
1425 * could result in a different adaptive state that ultimately results in a
1426 * lower global cost.
1428 * - The array space used by this function is bounded, so in degenerate cases it
1429 * is forced to start returning matches/literals before the algorithm has
1432 * Each call to this function does one of two things:
1434 * 1. Build a sequence of near-optimal matches/literals, up to some point, that
1435 * will be returned by subsequent calls to this function, then return the
1440 * 2. Return the next match/literal previously computed by a call to this
1443 * The return value is a (length, offset) pair specifying the match or literal
1444 * chosen. For literals, the length is 0 or 1 and the offset is meaningless.
1446 static struct lz_match
1447 lzx_choose_near_optimal_item(struct lzx_compressor *c)
1449 unsigned num_matches;
1450 const struct lz_match *matches;
1451 struct lz_match match;
1452 unsigned longest_len;
1453 unsigned longest_rep_len;
1454 u32 longest_rep_offset;
1458 if (c->optimum_cur_idx != c->optimum_end_idx) {
1459 /* Case 2: Return the next match/literal already found. */
1460 match.len = c->optimum[c->optimum_cur_idx].next.link -
1462 match.offset = c->optimum[c->optimum_cur_idx].next.match_offset;
1464 c->optimum_cur_idx = c->optimum[c->optimum_cur_idx].next.link;
1468 /* Case 1: Compute a new list of matches/literals to return. */
1470 c->optimum_cur_idx = 0;
1471 c->optimum_end_idx = 0;
1473 /* Search for matches at recent offsets. Only keep the one with the
1474 * longest match length. */
1475 longest_rep_len = LZX_MIN_MATCH_LEN - 1;
1476 if (c->match_window_pos >= 1) {
1477 unsigned limit = min(LZX_MAX_MATCH_LEN,
1478 c->match_window_end - c->match_window_pos);
1479 for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
1480 u32 offset = c->queue.R[i];
1481 const u8 *strptr = &c->cur_window[c->match_window_pos];
1482 const u8 *matchptr = strptr - offset;
1484 while (len < limit && strptr[len] == matchptr[len])
1486 if (len > longest_rep_len) {
1487 longest_rep_len = len;
1488 longest_rep_offset = offset;
1493 /* If there's a long match with a recent offset, take it. */
1494 if (longest_rep_len >= c->params.nice_match_length) {
1495 lzx_skip_bytes(c, longest_rep_len);
1496 return (struct lz_match) {
1497 .len = longest_rep_len,
1498 .offset = longest_rep_offset,
1502 /* Search other matches. */
1503 num_matches = lzx_get_matches(c, &matches);
1505 /* If there's a long match, take it. */
1507 longest_len = matches[num_matches - 1].len;
1508 if (longest_len >= c->params.nice_match_length) {
1509 lzx_skip_bytes(c, longest_len - 1);
1510 return matches[num_matches - 1];
1516 /* Calculate the cost to reach the next position by coding a literal.
1518 c->optimum[1].queue = c->queue;
1519 c->optimum[1].cost = lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
1521 c->optimum[1].prev.link = 0;
1523 /* Calculate the cost to reach any position up to and including that
1524 * reached by the longest match.
1526 * Note: We consider only the lowest-offset match that reaches each
1529 * Note: Some of the cost calculation stays the same for each offset,
1530 * regardless of how many lengths it gets used for. Therefore, to
1531 * improve performance, we hand-code the cost calculation instead of
1532 * calling lzx_match_cost() to do a from-scratch cost evaluation at each
1534 for (unsigned i = 0, len = 2; i < num_matches; i++) {
1536 struct lzx_lru_queue queue;
1538 unsigned position_slot;
1539 unsigned num_extra_bits;
1541 offset = matches[i].offset;
1545 position_slot = lzx_get_position_slot(offset, &queue);
1546 num_extra_bits = lzx_get_num_extra_bits(position_slot);
1547 if (num_extra_bits >= 3) {
1548 position_cost += num_extra_bits - 3;
1549 position_cost += c->costs.aligned[(offset + LZX_OFFSET_OFFSET) & 7];
1551 position_cost += num_extra_bits;
1555 unsigned len_header;
1556 unsigned main_symbol;
1559 cost = position_cost;
1561 len_header = min(len - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
1562 main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
1563 cost += c->costs.main[main_symbol];
1564 if (len_header == LZX_NUM_PRIMARY_LENS)
1565 cost += c->costs.len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
1567 c->optimum[len].queue = queue;
1568 c->optimum[len].prev.link = 0;
1569 c->optimum[len].prev.match_offset = offset;
1570 c->optimum[len].cost = cost;
1571 } while (++len <= matches[i].len);
1573 end_pos = longest_len;
1575 if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
1576 struct lzx_lru_queue queue;
1579 while (end_pos < longest_rep_len)
1580 c->optimum[++end_pos].cost = MC_INFINITE_COST;
1583 cost = lzx_match_cost(longest_rep_len, longest_rep_offset,
1585 if (cost <= c->optimum[longest_rep_len].cost) {
1586 c->optimum[longest_rep_len].queue = queue;
1587 c->optimum[longest_rep_len].prev.link = 0;
1588 c->optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
1589 c->optimum[longest_rep_len].cost = cost;
1593 /* Step forward, calculating the estimated minimum cost to reach each
1594 * position. The algorithm may find multiple paths to reach each
1595 * position; only the lowest-cost path is saved.
1597 * The progress of the parse is tracked in the @c->optimum array, which
1598 * for each position contains the minimum cost to reach that position,
1599 * the index of the start of the match/literal taken to reach that
1600 * position through the minimum-cost path, the offset of the match taken
1601 * (not relevant for literals), and the adaptive state that will exist
1602 * at that position after the minimum-cost path is taken. The @cur_pos
1603 * variable stores the position at which the algorithm is currently
1604 * considering coding choices, and the @end_pos variable stores the
1605 * greatest position at which the costs of coding choices have been
1606 * saved. (Actually, the algorithm guarantees that all positions up to
1607 * and including @end_pos are reachable by at least one path.)
1609 * The loop terminates when any one of the following conditions occurs:
1611 * 1. A match with length greater than or equal to @nice_match_length is
1612 * found. When this occurs, the algorithm chooses this match
1613 * unconditionally, and consequently the near-optimal match/literal
1614 * sequence up to and including that match is fully determined and it
1615 * can begin returning the match/literal list.
1617 * 2. @cur_pos reaches a position not overlapped by a preceding match.
1618 * In such cases, the near-optimal match/literal sequence up to
1619 * @cur_pos is fully determined and it can begin returning the
1620 * match/literal list.
1622 * 3. Failing either of the above in a degenerate case, the loop
1623 * terminates when space in the @c->optimum array is exhausted.
1624 * This terminates the algorithm and forces it to start returning
1625 * matches/literals even though they may not be globally optimal.
1627 * Upon loop termination, a nonempty list of matches/literals will have
1628 * been produced and stored in the @optimum array. These
1629 * matches/literals are linked in reverse order, so the last thing this
1630 * function does is reverse this list and return the first
1631 * match/literal, leaving the rest to be returned immediately by
1632 * subsequent calls to this function.
1638 /* Advance to next position. */
1641 /* Check termination conditions (2) and (3) noted above. */
1642 if (cur_pos == end_pos || cur_pos == LZX_OPTIM_ARRAY_LENGTH)
1643 return lzx_match_chooser_reverse_list(c, cur_pos);
1645 /* Search for matches at recent offsets. */
1646 longest_rep_len = LZX_MIN_MATCH_LEN - 1;
1647 unsigned limit = min(LZX_MAX_MATCH_LEN,
1648 c->match_window_end - c->match_window_pos);
1649 for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
1650 u32 offset = c->optimum[cur_pos].queue.R[i];
1651 const u8 *strptr = &c->cur_window[c->match_window_pos];
1652 const u8 *matchptr = strptr - offset;
1654 while (len < limit && strptr[len] == matchptr[len])
1656 if (len > longest_rep_len) {
1657 longest_rep_len = len;
1658 longest_rep_offset = offset;
1662 /* If we found a long match at a recent offset, choose it
1664 if (longest_rep_len >= c->params.nice_match_length) {
1665 /* Build the list of matches to return and get
1667 match = lzx_match_chooser_reverse_list(c, cur_pos);
1669 /* Append the long match to the end of the list. */
1670 c->optimum[cur_pos].next.match_offset = longest_rep_offset;
1671 c->optimum[cur_pos].next.link = cur_pos + longest_rep_len;
1672 c->optimum_end_idx = cur_pos + longest_rep_len;
1674 /* Skip over the remaining bytes of the long match. */
1675 lzx_skip_bytes(c, longest_rep_len);
1677 /* Return first match in the list. */
1681 /* Search other matches. */
1682 num_matches = lzx_get_matches(c, &matches);
1684 /* If there's a long match, take it. */
1686 longest_len = matches[num_matches - 1].len;
1687 if (longest_len >= c->params.nice_match_length) {
1688 /* Build the list of matches to return and get
1690 match = lzx_match_chooser_reverse_list(c, cur_pos);
1692 /* Append the long match to the end of the list. */
1693 c->optimum[cur_pos].next.match_offset =
1694 matches[num_matches - 1].offset;
1695 c->optimum[cur_pos].next.link = cur_pos + longest_len;
1696 c->optimum_end_idx = cur_pos + longest_len;
1698 /* Skip over the remaining bytes of the long match. */
1699 lzx_skip_bytes(c, longest_len - 1);
1701 /* Return first match in the list. */
1708 while (end_pos < cur_pos + longest_len)
1709 c->optimum[++end_pos].cost = MC_INFINITE_COST;
1711 /* Consider coding a literal. */
1712 cost = c->optimum[cur_pos].cost +
1713 lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
1715 if (cost < c->optimum[cur_pos + 1].cost) {
1716 c->optimum[cur_pos + 1].queue = c->optimum[cur_pos].queue;
1717 c->optimum[cur_pos + 1].cost = cost;
1718 c->optimum[cur_pos + 1].prev.link = cur_pos;
1721 /* Consider coding a match.
1723 * The hard-coded cost calculation is done for the same reason
1724 * stated in the comment for the similar loop earlier.
1725 * Actually, it is *this* one that has the biggest effect on
1726 * performance; overall LZX compression is > 10% faster with
1727 * this code compared to calling lzx_match_cost() with each
1729 for (unsigned i = 0, len = 2; i < num_matches; i++) {
1731 struct lzx_lru_queue queue;
1733 unsigned position_slot;
1734 unsigned num_extra_bits;
1736 offset = matches[i].offset;
1737 queue = c->optimum[cur_pos].queue;
1738 position_cost = c->optimum[cur_pos].cost;
1740 position_slot = lzx_get_position_slot(offset, &queue);
1741 num_extra_bits = lzx_get_num_extra_bits(position_slot);
1742 if (num_extra_bits >= 3) {
1743 position_cost += num_extra_bits - 3;
1744 position_cost += c->costs.aligned[
1745 (offset + LZX_OFFSET_OFFSET) & 7];
1747 position_cost += num_extra_bits;
1751 unsigned len_header;
1752 unsigned main_symbol;
1755 cost = position_cost;
1757 len_header = min(len - LZX_MIN_MATCH_LEN,
1758 LZX_NUM_PRIMARY_LENS);
1759 main_symbol = ((position_slot << 3) | len_header) +
1761 cost += c->costs.main[main_symbol];
1762 if (len_header == LZX_NUM_PRIMARY_LENS) {
1763 cost += c->costs.len[len -
1765 LZX_NUM_PRIMARY_LENS];
1767 if (cost < c->optimum[cur_pos + len].cost) {
1768 c->optimum[cur_pos + len].queue = queue;
1769 c->optimum[cur_pos + len].prev.link = cur_pos;
1770 c->optimum[cur_pos + len].prev.match_offset = offset;
1771 c->optimum[cur_pos + len].cost = cost;
1773 } while (++len <= matches[i].len);
1776 if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
1777 struct lzx_lru_queue queue;
1779 while (end_pos < cur_pos + longest_rep_len)
1780 c->optimum[++end_pos].cost = MC_INFINITE_COST;
1782 queue = c->optimum[cur_pos].queue;
1784 cost = c->optimum[cur_pos].cost +
1785 lzx_match_cost(longest_rep_len, longest_rep_offset,
1787 if (cost <= c->optimum[cur_pos + longest_rep_len].cost) {
1788 c->optimum[cur_pos + longest_rep_len].queue =
1790 c->optimum[cur_pos + longest_rep_len].prev.link =
1792 c->optimum[cur_pos + longest_rep_len].prev.match_offset =
1794 c->optimum[cur_pos + longest_rep_len].cost =
1801 static struct lz_match
1802 lzx_choose_lazy_item(struct lzx_compressor *c)
1804 const struct lz_match *matches;
1805 struct lz_match cur_match;
1806 struct lz_match next_match;
1809 if (c->prev_match.len) {
1810 cur_match = c->prev_match;
1811 c->prev_match.len = 0;
1813 num_matches = lzx_get_matches(c, &matches);
1814 if (num_matches == 0 ||
1815 (matches[num_matches - 1].len <= 3 &&
1816 (matches[num_matches - 1].len <= 2 ||
1817 matches[num_matches - 1].offset > 4096)))
1819 return (struct lz_match) { };
1822 cur_match = matches[num_matches - 1];
1825 if (cur_match.len >= c->params.nice_match_length) {
1826 lzx_skip_bytes(c, cur_match.len - 1);
1830 num_matches = lzx_get_matches(c, &matches);
1831 if (num_matches == 0 ||
1832 (matches[num_matches - 1].len <= 3 &&
1833 (matches[num_matches - 1].len <= 2 ||
1834 matches[num_matches - 1].offset > 4096)))
1836 lzx_skip_bytes(c, cur_match.len - 2);
1840 next_match = matches[num_matches - 1];
1842 if (next_match.len <= cur_match.len) {
1843 lzx_skip_bytes(c, cur_match.len - 2);
1846 c->prev_match = next_match;
1847 return (struct lz_match) { };
1852 * Return the next match or literal to use, delegating to the currently selected
1853 * match-choosing algorithm.
1855 * If the length of the returned 'struct lz_match' is less than
1856 * LZX_MIN_MATCH_LEN, then it is really a literal.
1858 static inline struct lz_match
1859 lzx_choose_item(struct lzx_compressor *c)
1861 return (*c->params.choose_item_func)(c);
1864 /* Set default symbol costs for the LZX Huffman codes. */
1866 lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
1870 /* Main code (part 1): Literal symbols */
1871 for (i = 0; i < LZX_NUM_CHARS; i++)
1874 /* Main code (part 2): Match header symbols */
1875 for (; i < num_main_syms; i++)
1876 costs->main[i] = 10;
1879 for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
1882 /* Aligned offset code */
1883 for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
1884 costs->aligned[i] = 3;
1887 /* Given the frequencies of symbols in an LZX-compressed block and the
1888 * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
1889 * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
1890 * will take fewer bits to output. */
1892 lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
1893 const struct lzx_codes * codes)
1895 unsigned aligned_cost = 0;
1896 unsigned verbatim_cost = 0;
1898 /* Verbatim blocks have a constant 3 bits per position footer. Aligned
1899 * offset blocks have an aligned offset symbol per position footer, plus
1900 * an extra 24 bits per block to output the lengths necessary to
1901 * reconstruct the aligned offset code itself. */
1902 for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
1903 verbatim_cost += 3 * freqs->aligned[i];
1904 aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
1906 aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
1907 if (aligned_cost < verbatim_cost)
1908 return LZX_BLOCKTYPE_ALIGNED;
1910 return LZX_BLOCKTYPE_VERBATIM;
1913 /* Find a sequence of matches/literals with which to output the specified LZX
1914 * block, then set the block's type to that which has the minimum cost to output
1915 * (either verbatim or aligned). */
1917 lzx_choose_items_for_block(struct lzx_compressor *c, struct lzx_block_spec *spec)
1919 const struct lzx_lru_queue orig_queue = c->queue;
1920 u32 num_passes_remaining = c->params.num_optim_passes;
1921 struct lzx_freqs freqs;
1922 const u8 *window_ptr;
1923 const u8 *window_end;
1924 struct lzx_item *next_chosen_item;
1925 struct lz_match lz_match;
1926 struct lzx_item lzx_item;
1928 LZX_ASSERT(num_passes >= 1);
1929 LZX_ASSERT(lz_mf_get_position(c->mf) == spec->window_pos);
1931 c->match_window_end = spec->window_pos + spec->block_size;
1933 if (c->params.num_optim_passes > 1) {
1934 if (spec->block_size == c->cur_window_size)
1935 c->get_matches_func = lzx_get_matches_fillcache_singleblock;
1937 c->get_matches_func = lzx_get_matches_fillcache_multiblock;
1938 c->skip_bytes_func = lzx_skip_bytes_fillcache;
1940 if (spec->block_size == c->cur_window_size)
1941 c->get_matches_func = lzx_get_matches_nocache_singleblock;
1943 c->get_matches_func = lzx_get_matches_nocache_multiblock;
1944 c->skip_bytes_func = lzx_skip_bytes_nocache;
1947 /* The first optimal parsing pass is done using the cost model already
1948 * set in c->costs. Each later pass is done using a cost model
1949 * computed from the previous pass.
1951 * To improve performance we only generate the array containing the
1952 * matches and literals in intermediate form on the final pass. */
1954 while (--num_passes_remaining) {
1955 c->match_window_pos = spec->window_pos;
1956 c->cache_ptr = c->cached_matches;
1957 memset(&freqs, 0, sizeof(freqs));
1958 window_ptr = &c->cur_window[spec->window_pos];
1959 window_end = window_ptr + spec->block_size;
1961 while (window_ptr != window_end) {
1963 lz_match = lzx_choose_item(c);
1965 LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
1966 lz_match.offset == c->max_window_size -
1967 LZX_MIN_MATCH_LEN));
1968 if (lz_match.len >= LZX_MIN_MATCH_LEN) {
1969 lzx_tally_match(lz_match.len, lz_match.offset,
1971 window_ptr += lz_match.len;
1973 lzx_tally_literal(*window_ptr, &freqs);
1977 lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
1978 lzx_set_costs(c, &spec->codes.lens, 15);
1979 c->queue = orig_queue;
1980 if (c->cache_ptr <= c->cache_limit) {
1981 c->get_matches_func = lzx_get_matches_usecache_nocheck;
1982 c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
1984 c->get_matches_func = lzx_get_matches_usecache;
1985 c->skip_bytes_func = lzx_skip_bytes_usecache;
1989 c->match_window_pos = spec->window_pos;
1990 c->cache_ptr = c->cached_matches;
1991 memset(&freqs, 0, sizeof(freqs));
1992 window_ptr = &c->cur_window[spec->window_pos];
1993 window_end = window_ptr + spec->block_size;
1995 spec->chosen_items = &c->chosen_items[spec->window_pos];
1996 next_chosen_item = spec->chosen_items;
1998 unsigned unseen_cost = 9;
1999 while (window_ptr != window_end) {
2001 lz_match = lzx_choose_item(c);
2003 LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
2004 lz_match.offset == c->max_window_size -
2005 LZX_MIN_MATCH_LEN));
2006 if (lz_match.len >= LZX_MIN_MATCH_LEN) {
2007 lzx_item.data = lzx_tally_match(lz_match.len,
2010 window_ptr += lz_match.len;
2012 lzx_item.data = lzx_tally_literal(*window_ptr, &freqs);
2015 *next_chosen_item++ = lzx_item;
2017 /* When doing one-pass "near-optimal" parsing, update the cost
2018 * model occassionally. */
2019 if (unlikely((next_chosen_item - spec->chosen_items) % 2048 == 0) &&
2020 c->params.choose_item_func == lzx_choose_near_optimal_item &&
2021 c->params.num_optim_passes == 1)
2023 lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
2024 lzx_set_costs(c, &spec->codes.lens, unseen_cost);
2025 if (unseen_cost < 15)
2029 spec->num_chosen_items = next_chosen_item - spec->chosen_items;
2030 lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
2031 spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
2034 /* Prepare the input window into one or more LZX blocks ready to be output. */
2036 lzx_prepare_blocks(struct lzx_compressor *c)
2038 /* Set up a default cost model. */
2039 if (c->params.choose_item_func == lzx_choose_near_optimal_item)
2040 lzx_set_default_costs(&c->costs, c->num_main_syms);
2042 /* Set up the block specifications.
2043 * TODO: The compression ratio could be slightly improved by performing
2044 * data-dependent block splitting instead of using fixed-size blocks.
2045 * Doing so well is a computationally hard problem, however. */
2046 c->num_blocks = DIV_ROUND_UP(c->cur_window_size, LZX_DIV_BLOCK_SIZE);
2047 for (unsigned i = 0; i < c->num_blocks; i++) {
2048 u32 pos = LZX_DIV_BLOCK_SIZE * i;
2049 c->block_specs[i].window_pos = pos;
2050 c->block_specs[i].block_size = min(c->cur_window_size - pos,
2051 LZX_DIV_BLOCK_SIZE);
2054 /* Load the window into the match-finder. */
2055 lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
2057 /* Determine sequence of matches/literals to output for each block. */
2058 lzx_lru_queue_init(&c->queue);
2059 c->optimum_cur_idx = 0;
2060 c->optimum_end_idx = 0;
2061 c->prev_match.len = 0;
2062 for (unsigned i = 0; i < c->num_blocks; i++)
2063 lzx_choose_items_for_block(c, &c->block_specs[i]);
2067 lzx_build_params(unsigned int compression_level,
2068 u32 max_window_size,
2069 struct lzx_compressor_params *lzx_params)
2071 if (compression_level < 25) {
2072 lzx_params->choose_item_func = lzx_choose_lazy_item;
2073 lzx_params->num_optim_passes = 1;
2074 if (max_window_size <= 262144)
2075 lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
2077 lzx_params->mf_algo = LZ_MF_BINARY_TREES;
2078 lzx_params->min_match_length = 3;
2079 lzx_params->nice_match_length = 25 + compression_level * 2;
2080 lzx_params->max_search_depth = 25 + compression_level;
2082 lzx_params->choose_item_func = lzx_choose_near_optimal_item;
2083 lzx_params->num_optim_passes = compression_level / 20;
2084 if (max_window_size <= 32768 && lzx_params->num_optim_passes == 1)
2085 lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
2087 lzx_params->mf_algo = LZ_MF_BINARY_TREES;
2088 lzx_params->min_match_length = (compression_level >= 45) ? 2 : 3;
2089 lzx_params->nice_match_length = min(((u64)compression_level * 32) / 50,
2091 lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
2097 lzx_build_mf_params(const struct lzx_compressor_params *lzx_params,
2098 u32 max_window_size, struct lz_mf_params *mf_params)
2100 memset(mf_params, 0, sizeof(*mf_params));
2102 mf_params->algorithm = lzx_params->mf_algo;
2103 mf_params->max_window_size = max_window_size;
2104 mf_params->min_match_len = lzx_params->min_match_length;
2105 mf_params->max_match_len = LZX_MAX_MATCH_LEN;
2106 mf_params->max_search_depth = lzx_params->max_search_depth;
2107 mf_params->nice_match_len = lzx_params->nice_match_length;
2111 lzx_free_compressor(void *_c);
2114 lzx_get_needed_memory(size_t max_window_size, unsigned int compression_level)
2116 struct lzx_compressor_params params;
2119 if (!lzx_window_size_valid(max_window_size))
2122 lzx_build_params(compression_level, max_window_size, ¶ms);
2124 size += sizeof(struct lzx_compressor);
2126 size += max_window_size;
2128 size += DIV_ROUND_UP(max_window_size, LZX_DIV_BLOCK_SIZE) *
2129 sizeof(struct lzx_block_spec);
2131 size += max_window_size * sizeof(struct lzx_item);
2133 size += lz_mf_get_needed_memory(params.mf_algo, max_window_size);
2134 if (params.choose_item_func == lzx_choose_near_optimal_item) {
2135 size += (LZX_OPTIM_ARRAY_LENGTH + params.nice_match_length) *
2136 sizeof(struct lzx_mc_pos_data);
2138 if (params.num_optim_passes > 1)
2139 size += LZX_CACHE_LEN * sizeof(struct lz_match);
2141 size += LZX_MAX_MATCHES_PER_POS * sizeof(struct lz_match);
2146 lzx_create_compressor(size_t max_window_size, unsigned int compression_level,
2149 struct lzx_compressor *c;
2150 struct lzx_compressor_params params;
2151 struct lz_mf_params mf_params;
2153 if (!lzx_window_size_valid(max_window_size))
2154 return WIMLIB_ERR_INVALID_PARAM;
2156 lzx_build_params(compression_level, max_window_size, ¶ms);
2157 lzx_build_mf_params(¶ms, max_window_size, &mf_params);
2158 if (!lz_mf_params_valid(&mf_params))
2159 return WIMLIB_ERR_INVALID_PARAM;
2161 c = CALLOC(1, sizeof(struct lzx_compressor));
2166 c->num_main_syms = lzx_get_num_main_syms(max_window_size);
2167 c->max_window_size = max_window_size;
2169 c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
2173 c->block_specs = MALLOC(DIV_ROUND_UP(max_window_size,
2174 LZX_DIV_BLOCK_SIZE) *
2175 sizeof(struct lzx_block_spec));
2176 if (!c->block_specs)
2179 c->chosen_items = MALLOC(max_window_size * sizeof(struct lzx_item));
2180 if (!c->chosen_items)
2183 c->mf = lz_mf_alloc(&mf_params);
2187 if (params.choose_item_func == lzx_choose_near_optimal_item) {
2188 c->optimum = MALLOC((LZX_OPTIM_ARRAY_LENGTH +
2189 params.nice_match_length) *
2190 sizeof(struct lzx_mc_pos_data));
2195 if (params.num_optim_passes > 1) {
2196 c->cached_matches = MALLOC(LZX_CACHE_LEN *
2197 sizeof(struct lz_match));
2198 if (!c->cached_matches)
2200 c->cache_limit = c->cached_matches + LZX_CACHE_LEN -
2201 (LZX_MAX_MATCHES_PER_POS + 1);
2203 c->cached_matches = MALLOC(LZX_MAX_MATCHES_PER_POS *
2204 sizeof(struct lz_match));
2205 if (!c->cached_matches)
2213 lzx_free_compressor(c);
2214 return WIMLIB_ERR_NOMEM;
2218 lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
2219 void *compressed_data, size_t compressed_size_avail, void *_c)
2221 struct lzx_compressor *c = _c;
2222 struct output_bitstream ostream;
2223 size_t compressed_size;
2225 if (uncompressed_size < 100) {
2226 LZX_DEBUG("Too small to bother compressing.");
2230 LZX_DEBUG("Attempting to compress %zu bytes...",
2233 /* The input data must be preprocessed. To avoid changing the original
2234 * input, copy it to a temporary buffer. */
2235 memcpy(c->cur_window, uncompressed_data, uncompressed_size);
2236 c->cur_window_size = uncompressed_size;
2238 /* Before doing any actual compression, do the call instruction (0xe8
2239 * byte) translation on the uncompressed data. */
2240 lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
2242 /* Prepare the compressed data. */
2243 lzx_prepare_blocks(c);
2245 /* Generate the compressed data. */
2246 init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
2247 lzx_write_all_blocks(c, &ostream);
2249 compressed_size = flush_output_bitstream(&ostream);
2250 if (compressed_size == (u32)~0UL) {
2251 LZX_DEBUG("Data did not compress to %zu bytes or less!",
2252 compressed_size_avail);
2256 LZX_DEBUG("Done: compressed %zu => %zu bytes.",
2257 uncompressed_size, compressed_size);
2259 return compressed_size;
2263 lzx_free_compressor(void *_c)
2265 struct lzx_compressor *c = _c;
2268 ALIGNED_FREE(c->cur_window);
2269 FREE(c->block_specs);
2270 FREE(c->chosen_items);
2273 FREE(c->cached_matches);
2278 const struct compressor_ops lzx_compressor_ops = {
2279 .get_needed_memory = lzx_get_needed_memory,
2280 .create_compressor = lzx_create_compressor,
2281 .compress = lzx_compress,
2282 .free_compressor = lzx_free_compressor,