6 * Copyright (C) 2013, 2014 Eric Biggers
8 * This file is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU Lesser General Public License as published by the Free
10 * Software Foundation; either version 3 of the License, or (at your option) any
13 * This file is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
15 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this file; if not, see http://www.gnu.org/licenses/.
23 * This is a decompressor for the LZMS compression format used by Microsoft.
24 * This format is not documented, but it is one of the formats supported by the
25 * compression API available in Windows 8, and as of Windows 8 it is one of the
26 * formats that can be used in WIM files.
28 * This decompressor only implements "raw" decompression, which decompresses a
29 * single LZMS-compressed block. This behavior is the same as that of
30 * Decompress() in the Windows 8 compression API when using a compression handle
31 * created with CreateDecompressor() with the Algorithm parameter specified as
32 * COMPRESS_ALGORITHM_LZMS | COMPRESS_RAW. Presumably, non-raw LZMS data
33 * is a container format from which the locations and sizes (both compressed and
34 * uncompressed) of the constituent blocks can be determined.
36 * An LZMS-compressed block must be read in 16-bit little endian units from both
37 * directions. One logical bitstream starts at the front of the block and
38 * proceeds forwards. Another logical bitstream starts at the end of the block
39 * and proceeds backwards. Bits read from the forwards bitstream constitute
40 * range-encoded data, whereas bits read from the backwards bitstream constitute
41 * Huffman-encoded symbols or verbatim bits. For both bitstreams, the ordering
42 * of the bits within the 16-bit coding units is such that the first bit is the
43 * high-order bit and the last bit is the low-order bit.
45 * From these two logical bitstreams, an LZMS decompressor can reconstitute the
46 * series of items that make up the LZMS data representation. Each such item
47 * may be a literal byte or a match. Matches may be either traditional LZ77
48 * matches or "delta" matches, either of which can have its offset encoded
49 * explicitly or encoded via a reference to a recently used (repeat) offset.
51 * A traditional LZ match consists of a length and offset; it asserts that the
52 * sequence of bytes beginning at the current position and extending for the
53 * length is exactly equal to the equal-length sequence of bytes at the offset
54 * back in the window. On the other hand, a delta match consists of a length,
55 * raw offset, and power. It asserts that the sequence of bytes beginning at
56 * the current position and extending for the length is equal to the bytewise
57 * sum of the two equal-length sequences of bytes (2**power) and (raw_offset *
58 * 2**power) bytes before the current position, minus bytewise the sequence of
59 * bytes beginning at (2**power + raw_offset * 2**power) bytes before the
60 * current position. Although not generally as useful as traditional LZ
61 * matches, delta matches can be helpful on some types of data. Both LZ and
62 * delta matches may overlap with the current position; in fact, the minimum
63 * offset is 1, regardless of match length.
65 * For LZ matches, up to 3 repeat offsets are allowed, similar to some other
66 * LZ-based formats such as LZX and LZMA. They must updated in an LRU fashion,
67 * except for a quirk: inserting anything to the front of the queue must be
68 * delayed by one LZMS item. The reason for this is presumably that there is
69 * almost no reason to code the same match offset twice in a row, since you
70 * might as well have coded a longer match at that offset. For this same
71 * reason, it also is a requirement that when an offset in the queue is used,
72 * that offset is removed from the queue immediately (and made pending for
73 * front-insertion after the following decoded item), and everything to the
74 * right is shifted left one queue slot. This creates a need for an "overflow"
75 * fourth entry in the queue, even though it is only possible to decode
76 * references to the first 3 entries at any given time. The queue must be
77 * initialized to the offsets {1, 2, 3, 4}.
79 * Repeat delta matches are handled similarly, but for them there are two queues
80 * updated in lock-step: one for powers and one for raw offsets. The power
81 * queue must be initialized to {0, 0, 0, 0}, and the raw offset queue must be
82 * initialized to {1, 2, 3, 4}.
84 * Bits from the range decoder must be used to disambiguate item types. The
85 * range decoder must hold two state variables: the range, which must initially
86 * be set to 0xffffffff, and the current code, which must initially be set to
87 * the first 32 bits read from the forwards bitstream. The range must be
88 * maintained above 0xffff; when it falls below 0xffff, both the range and code
89 * must be left-shifted by 16 bits and the low 16 bits of the code must be
90 * filled in with the next 16 bits from the forwards bitstream.
92 * To decode each bit, the range decoder requires a probability that is
93 * logically a real number between 0 and 1. Multiplying this probability by the
94 * current range and taking the floor gives the bound between the 0-bit region
95 * of the range and the 1-bit region of the range. However, in LZMS,
96 * probabilities are restricted to values of n/64 where n is an integer is
97 * between 1 and 63 inclusively, so the implementation may use integer
98 * operations instead. Following calculation of the bound, if the current code
99 * is in the 0-bit region, the new range becomes the current code and the
100 * decoded bit is 0; otherwise, the bound must be subtracted from both the range
101 * and the code, and the decoded bit is 1. More information about range coding
102 * can be found at https://en.wikipedia.org/wiki/Range_encoding. Furthermore,
103 * note that the LZMA format also uses range coding and has public domain code
106 * The probability used to range-decode each bit must be taken from a table, of
107 * which one instance must exist for each distinct context in which a
108 * range-decoded bit is needed. At each call of the range decoder, the
109 * appropriate probability must be obtained by indexing the appropriate
110 * probability table with the last 4 (in the context disambiguating literals
111 * from matches), 5 (in the context disambiguating LZ matches from delta
112 * matches), or 6 (in all other contexts) bits recently range-decoded in that
113 * context, ordered such that the most recently decoded bit is the low-order bit
116 * Furthermore, each probability entry itself is variable, as its value must be
117 * maintained as n/64 where n is the number of 0 bits in the most recently
118 * decoded 64 bits with that same entry. This allows the compressed
119 * representation to adapt to the input and use fewer bits to represent the most
120 * likely data; note that LZMA uses a similar scheme. Initially, the most
121 * recently 64 decoded bits for each probability entry are assumed to be
122 * 0x0000000055555555 (high order to low order); therefore, all probabilities
123 * are initially 48/64. During the course of decoding, each probability may be
124 * updated to as low as 0/64 (as a result of reading many consecutive 1 bits
125 * with that entry) or as high as 64/64 (as a result of reading many consecutive
126 * 0 bits with that entry); however, probabilities of 0/64 and 64/64 cannot be
127 * used as-is but rather must be adjusted to 1/64 and 63/64, respectively,
128 * before being used for range decoding.
130 * Representations of the LZMS items themselves must be read from the backwards
131 * bitstream. For this, there are 5 different Huffman codes used:
133 * - The literal code, used for decoding literal bytes. Each of the 256
134 * symbols represents a literal byte. This code must be rebuilt whenever
135 * 1024 symbols have been decoded with it.
137 * - The LZ offset code, used for decoding the offsets of standard LZ77
138 * matches. Each symbol represents an offset slot, which corresponds to a
139 * base value and some number of extra bits which must be read and added to
140 * the base value to reconstitute the full offset. The number of symbols in
141 * this code is the number of offset slots needed to represent all possible
142 * offsets in the uncompressed block. This code must be rebuilt whenever
143 * 1024 symbols have been decoded with it.
145 * - The length code, used for decoding length symbols. Each of the 54 symbols
146 * represents a length slot, which corresponds to a base value and some
147 * number of extra bits which must be read and added to the base value to
148 * reconstitute the full length. This code must be rebuilt whenever 512
149 * symbols have been decoded with it.
151 * - The delta offset code, used for decoding the offsets of delta matches.
152 * Each symbol corresponds to an offset slot, which corresponds to a base
153 * value and some number of extra bits which must be read and added to the
154 * base value to reconstitute the full offset. The number of symbols in this
155 * code is equal to the number of symbols in the LZ offset code. This code
156 * must be rebuilt whenever 1024 symbols have been decoded with it.
158 * - The delta power code, used for decoding the powers of delta matches. Each
159 * of the 8 symbols corresponds to a power. This code must be rebuilt
160 * whenever 512 symbols have been decoded with it.
162 * All the LZMS Huffman codes must be built adaptively based on symbol
163 * frequencies. Initially, each code must be built assuming that all symbols
164 * have equal frequency. Following that, each code must be rebuilt whenever a
165 * certain number of symbols has been decoded with it.
167 * Like other compression formats such as XPRESS, LZX, and DEFLATE, the LZMS
168 * format requires that all Huffman codes be constructed in canonical form.
169 * This form requires that same-length codewords be lexicographically ordered
170 * the same way as the corresponding symbols and that all shorter codewords
171 * lexicographically precede longer codewords. Such a code can be constructed
172 * directly from codeword lengths, although in LZMS this is not actually
173 * necessary because the codes are built using adaptive symbol frequencies.
175 * Even with the canonical code restriction, the same frequencies can be used to
176 * construct multiple valid Huffman codes. Therefore, the decompressor needs to
177 * construct the right one. Specifically, the LZMS format requires that the
178 * Huffman code be constructed as if the well-known priority queue algorithm is
179 * used and frequency ties are always broken in favor of leaf nodes. See
180 * make_canonical_huffman_code() in compress_common.c for more information.
182 * Codewords in LZMS are guaranteed to not exceed 15 bits. The format otherwise
183 * places no restrictions on codeword length. Therefore, the Huffman code
184 * construction algorithm that a correct LZMS decompressor uses need not
185 * implement length-limited code construction. But if it does (e.g. by virtue
186 * of being shared among multiple compression algorithms), the details of how it
187 * does so are unimportant, provided that the maximum codeword length parameter
188 * is set to at least 15 bits.
190 * An LZMS-compressed block seemingly cannot have a compressed size greater than
191 * or equal to the uncompressed size. In such cases the block must be stored
194 * After all LZMS items have been decoded, the data must be postprocessed to
195 * translate absolute address encoded in x86 instructions into their original
196 * relative addresses.
198 * Details omitted above can be found in the code. Note that in the absence of
199 * an official specification there is no guarantee that this decompressor
200 * handles all possible cases.
207 #include "wimlib/compress_common.h"
208 #include "wimlib/decompressor_ops.h"
209 #include "wimlib/decompress_common.h"
210 #include "wimlib/error.h"
211 #include "wimlib/lzms.h"
212 #include "wimlib/unaligned.h"
213 #include "wimlib/util.h"
217 #define LZMS_DECODE_TABLE_BITS 10
219 /* Structure used for range decoding, reading bits forwards. This is the first
220 * logical bitstream mentioned above. */
221 struct lzms_range_decoder_raw {
222 /* The relevant part of the current range. Although the logical range
223 * for range decoding is a very large integer, only a small portion
224 * matters at any given time, and it can be normalized (shifted left)
225 * whenever it gets too small. */
228 /* The current position in the range encoded by the portion of the input
232 /* Pointer to the next little-endian 16-bit integer in the compressed
233 * input data (reading forwards). */
236 /* Number of 16-bit integers remaining in the compressed input data
237 * (reading forwards). */
238 size_t num_le16_remaining;
241 /* Structure used for reading raw bits backwards. This is the second logical
242 * bitstream mentioned above. */
243 struct lzms_input_bitstream {
244 /* Holding variable for bits that have been read from the compressed
245 * data. The bits are ordered from high-order to low-order. */
246 /* XXX: Without special-case code to handle reading more than 17 bits
247 * at a time, this needs to be 64 bits rather than 32 bits. */
250 /* Number of bits in @bitbuf that are used. */
251 unsigned num_filled_bits;
253 /* Pointer to the one past the next little-endian 16-bit integer in the
254 * compressed input data (reading backwards). */
257 /* Number of 16-bit integers remaining in the compressed input data
258 * (reading backwards). */
259 size_t num_le16_remaining;
262 /* Structure used for range decoding. This wraps around `struct
263 * lzms_range_decoder_raw' to use and maintain probability entries. */
264 struct lzms_range_decoder {
265 /* Pointer to the raw range decoder, which has no persistent knowledge
266 * of probabilities. Multiple lzms_range_decoder's share the same
267 * lzms_range_decoder_raw. */
268 struct lzms_range_decoder_raw *rd;
270 /* Bits recently decoded by this range decoder. This are used as in
271 * index into @prob_entries. */
274 /* Bitmask for @state to prevent its value from exceeding the number of
275 * probability entries. */
278 /* Probability entries being used for this range decoder. */
279 struct lzms_probability_entry prob_entries[LZMS_MAX_NUM_STATES];
282 /* Structure used for Huffman decoding, optionally using the decoded symbols as
283 * slots into a base table to determine how many extra bits need to be read to
284 * reconstitute the full value. */
285 struct lzms_huffman_decoder {
287 /* Bitstream to read Huffman-encoded symbols and verbatim bits from.
288 * Multiple lzms_huffman_decoder's share the same lzms_input_bitstream.
290 struct lzms_input_bitstream *is;
292 /* Pointer to the slot base table to use. It is indexed by the decoded
293 * Huffman symbol that specifies the slot. The entry specifies the base
294 * value to use, and the position of its high bit is the number of
295 * additional bits that must be read to reconstitute the full value.
297 * This member need not be set if only raw Huffman symbols are being
298 * read using this decoder. */
299 const u32 *slot_base_tab;
301 const u8 *extra_bits_tab;
303 /* Number of symbols that have been read using this code far. Reset to
304 * 0 whenever the code is rebuilt. */
307 /* When @num_syms_read reaches this number, the Huffman code must be
311 /* Number of symbols in the represented Huffman code. */
314 /* Running totals of symbol frequencies. These are diluted slightly
315 * whenever the code is rebuilt. */
316 u32 sym_freqs[LZMS_MAX_NUM_SYMS];
318 /* The length, in bits, of each symbol in the Huffman code. */
319 u8 lens[LZMS_MAX_NUM_SYMS];
321 /* The codeword of each symbol in the Huffman code. */
322 u32 codewords[LZMS_MAX_NUM_SYMS];
324 /* A table for quickly decoding symbols encoded using the Huffman code.
326 u16 decode_table[(1U << LZMS_DECODE_TABLE_BITS) + 2 * LZMS_MAX_NUM_SYMS]
327 _aligned_attribute(DECODE_TABLE_ALIGNMENT);
330 /* State of the LZMS decompressor. */
331 struct lzms_decompressor {
333 /* Pointer to the beginning of the uncompressed data buffer. */
336 /* Pointer to the next position in the uncompressed data buffer. */
339 /* Pointer to one past the end of the uncompressed data buffer. */
342 /* Range decoder, which reads bits from the beginning of the compressed
343 * block, going forwards. */
344 struct lzms_range_decoder_raw rd;
346 /* Input bitstream, which reads from the end of the compressed block,
347 * going backwards. */
348 struct lzms_input_bitstream is;
350 /* Range decoders. */
351 struct lzms_range_decoder main_range_decoder;
352 struct lzms_range_decoder match_range_decoder;
353 struct lzms_range_decoder lz_match_range_decoder;
354 struct lzms_range_decoder lz_repeat_match_range_decoders[LZMS_NUM_RECENT_OFFSETS - 1];
355 struct lzms_range_decoder delta_match_range_decoder;
356 struct lzms_range_decoder delta_repeat_match_range_decoders[LZMS_NUM_RECENT_OFFSETS - 1];
358 /* Huffman decoders. */
359 struct lzms_huffman_decoder literal_decoder;
360 struct lzms_huffman_decoder lz_offset_decoder;
361 struct lzms_huffman_decoder length_decoder;
362 struct lzms_huffman_decoder delta_power_decoder;
363 struct lzms_huffman_decoder delta_offset_decoder;
365 /* LRU (least-recently-used) queues for match information. */
366 struct lzms_lru_queues lru;
368 /* Used for postprocessing. */
369 s32 last_target_usages[65536];
372 /* Initialize the input bitstream @is to read forwards from the specified
373 * compressed data buffer @in that is @in_limit 16-bit integers long. */
375 lzms_input_bitstream_init(struct lzms_input_bitstream *is,
376 const le16 *in, size_t in_limit)
379 is->num_filled_bits = 0;
380 is->in = in + in_limit;
381 is->num_le16_remaining = in_limit;
384 /* Ensures that @num_bits bits are buffered in the input bitstream. */
386 lzms_input_bitstream_ensure_bits(struct lzms_input_bitstream *is,
389 while (is->num_filled_bits < num_bits) {
392 LZMS_ASSERT(is->num_filled_bits + 16 <= sizeof(is->bitbuf) * 8);
394 if (unlikely(is->num_le16_remaining == 0))
397 next = get_unaligned_u16_le(--is->in);
398 is->num_le16_remaining--;
400 is->bitbuf |= next << (sizeof(is->bitbuf) * 8 - is->num_filled_bits - 16);
401 is->num_filled_bits += 16;
407 /* Returns the next @num_bits bits that are buffered in the input bitstream. */
409 lzms_input_bitstream_peek_bits(struct lzms_input_bitstream *is,
412 LZMS_ASSERT(is->num_filled_bits >= num_bits);
413 return is->bitbuf >> (sizeof(is->bitbuf) * 8 - num_bits);
416 /* Removes the next @num_bits bits that are buffered in the input bitstream. */
418 lzms_input_bitstream_remove_bits(struct lzms_input_bitstream *is,
421 LZMS_ASSERT(is->num_filled_bits >= num_bits);
422 is->bitbuf <<= num_bits;
423 is->num_filled_bits -= num_bits;
426 /* Removes and returns the next @num_bits bits that are buffered in the input
429 lzms_input_bitstream_pop_bits(struct lzms_input_bitstream *is,
432 u32 bits = lzms_input_bitstream_peek_bits(is, num_bits);
433 lzms_input_bitstream_remove_bits(is, num_bits);
437 /* Reads the next @num_bits from the input bitstream. */
439 lzms_input_bitstream_read_bits(struct lzms_input_bitstream *is,
442 if (unlikely(lzms_input_bitstream_ensure_bits(is, num_bits)))
444 return lzms_input_bitstream_pop_bits(is, num_bits);
447 /* Initialize the range decoder @rd to read forwards from the specified
448 * compressed data buffer @in that is @in_limit 16-bit integers long. */
450 lzms_range_decoder_raw_init(struct lzms_range_decoder_raw *rd,
451 const le16 *in, size_t in_limit)
453 rd->range = 0xffffffff;
454 rd->code = ((u32)get_unaligned_u16_le(&in[0]) << 16) |
455 ((u32)get_unaligned_u16_le(&in[1]) << 0);
457 rd->num_le16_remaining = in_limit - 2;
460 /* Ensures the current range of the range decoder has at least 16 bits of
463 lzms_range_decoder_raw_normalize(struct lzms_range_decoder_raw *rd)
465 if (rd->range <= 0xffff) {
467 if (unlikely(rd->num_le16_remaining == 0))
469 rd->code = (rd->code << 16) | get_unaligned_u16_le(rd->in++);
470 rd->num_le16_remaining--;
475 /* Decode and return the next bit from the range decoder (raw version).
477 * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0.
480 lzms_range_decoder_raw_decode_bit(struct lzms_range_decoder_raw *rd, u32 prob)
484 /* Ensure the range has at least 16 bits of precision. */
485 lzms_range_decoder_raw_normalize(rd);
487 /* Based on the probability, calculate the bound between the 0-bit
488 * region and the 1-bit region of the range. */
489 bound = (rd->range >> LZMS_PROBABILITY_BITS) * prob;
491 if (rd->code < bound) {
492 /* Current code is in the 0-bit region of the range. */
496 /* Current code is in the 1-bit region of the range. */
503 /* Decode and return the next bit from the range decoder. This wraps around
504 * lzms_range_decoder_raw_decode_bit() to handle using and updating the
505 * appropriate probability table. */
507 lzms_range_decode_bit(struct lzms_range_decoder *dec)
509 struct lzms_probability_entry *prob_entry;
513 /* Load the probability entry corresponding to the current state. */
514 prob_entry = &dec->prob_entries[dec->state];
516 /* Get the probability that the next bit is 0. */
517 prob = lzms_get_probability(prob_entry);
519 /* Decode the next bit. */
520 bit = lzms_range_decoder_raw_decode_bit(dec->rd, prob);
522 /* Update the state and probability entry based on the decoded bit. */
523 dec->state = (((dec->state << 1) | bit) & dec->mask);
524 lzms_update_probability_entry(prob_entry, bit);
526 /* Return the decoded bit. */
531 /* Build the decoding table for a new adaptive Huffman code using the alphabet
532 * used in the specified Huffman decoder, with the symbol frequencies
535 lzms_rebuild_adaptive_huffman_code(struct lzms_huffman_decoder *dec)
538 /* XXX: This implementation makes use of code already implemented for
539 * the XPRESS and LZX compression formats. However, since for the
540 * adaptive codes used in LZMS we don't actually need the explicit codes
541 * themselves, only the decode tables, it may be possible to optimize
542 * this by somehow directly building or updating the Huffman decode
543 * table. This may be a worthwhile optimization because the adaptive
544 * codes change many times throughout a decompression run. */
545 LZMS_DEBUG("Rebuilding adaptive Huffman code (num_syms=%u)",
547 make_canonical_huffman_code(dec->num_syms, LZMS_MAX_CODEWORD_LEN,
548 dec->sym_freqs, dec->lens, dec->codewords);
549 #if defined(ENABLE_LZMS_DEBUG)
552 make_huffman_decode_table(dec->decode_table, dec->num_syms,
553 LZMS_DECODE_TABLE_BITS, dec->lens,
554 LZMS_MAX_CODEWORD_LEN);
555 LZMS_ASSERT(ret == 0);
558 /* Decode and return the next Huffman-encoded symbol from the LZMS-compressed
559 * block using the specified Huffman decoder. */
561 lzms_huffman_decode_symbol(struct lzms_huffman_decoder *dec)
563 const u16 *decode_table = dec->decode_table;
564 struct lzms_input_bitstream *is = dec->is;
569 /* The Huffman codes used in LZMS are adaptive and must be rebuilt
570 * whenever a certain number of symbols have been read. Each such
571 * rebuild uses the current symbol frequencies, but the format also
572 * requires that the symbol frequencies be halved after each code
573 * rebuild. This diminishes the effect of old symbols on the current
574 * Huffman codes, thereby causing the Huffman codes to be more locally
576 if (dec->num_syms_read == dec->rebuild_freq) {
577 lzms_rebuild_adaptive_huffman_code(dec);
578 for (unsigned i = 0; i < dec->num_syms; i++) {
579 dec->sym_freqs[i] >>= 1;
580 dec->sym_freqs[i] += 1;
582 dec->num_syms_read = 0;
585 /* XXX: Copied from read_huffsym() (decompress_common.h), since this
586 * uses a different input bitstream type. Should unify the
587 * implementations. */
588 lzms_input_bitstream_ensure_bits(is, LZMS_MAX_CODEWORD_LEN);
590 /* Index the decode table by the next table_bits bits of the input. */
591 key_bits = lzms_input_bitstream_peek_bits(is, LZMS_DECODE_TABLE_BITS);
592 entry = decode_table[key_bits];
593 if (likely(entry < 0xC000)) {
594 /* Fast case: The decode table directly provided the symbol and
595 * codeword length. The low 11 bits are the symbol, and the
596 * high 5 bits are the codeword length. */
597 lzms_input_bitstream_remove_bits(is, entry >> 11);
600 /* Slow case: The codeword for the symbol is longer than
601 * table_bits, so the symbol does not have an entry directly in
602 * the first (1 << table_bits) entries of the decode table.
603 * Traverse the appropriate binary tree bit-by-bit in order to
604 * decode the symbol. */
605 lzms_input_bitstream_remove_bits(is, LZMS_DECODE_TABLE_BITS);
607 key_bits = (entry & 0x3FFF) + lzms_input_bitstream_pop_bits(is, 1);
608 } while ((entry = decode_table[key_bits]) >= 0xC000);
612 /* Tally and return the decoded symbol. */
613 ++dec->sym_freqs[sym];
614 ++dec->num_syms_read;
618 /* Decode a number from the LZMS bitstream, encoded as a Huffman-encoded symbol
619 * specifying a "slot" (whose corresponding value is looked up in a static
620 * table) plus the number specified by a number of extra bits depending on the
623 lzms_decode_value(struct lzms_huffman_decoder *dec)
626 unsigned num_extra_bits;
629 LZMS_ASSERT(dec->slot_base_tab != NULL);
630 LZMS_ASSERT(dec->extra_bits_tab != NULL);
632 /* Read the slot (offset slot, length slot, etc.), which is encoded as a
634 slot = lzms_huffman_decode_symbol(dec);
636 /* Get the number of extra bits needed to represent the range of values
637 * that share the slot. */
638 num_extra_bits = dec->extra_bits_tab[slot];
640 /* Read the number of extra bits and add them to the slot base to form
641 * the final decoded value. */
642 extra_bits = lzms_input_bitstream_read_bits(dec->is, num_extra_bits);
643 return dec->slot_base_tab[slot] + extra_bits;
646 /* Copy a literal to the output buffer. */
648 lzms_copy_literal(struct lzms_decompressor *ctx, u8 literal)
650 *ctx->out_next++ = literal;
654 /* Validate an LZ match and copy it to the output buffer. */
656 lzms_copy_lz_match(struct lzms_decompressor *ctx, u32 length, u32 offset)
660 if (length > ctx->out_end - ctx->out_next) {
661 LZMS_DEBUG("Match overrun!");
664 if (offset > ctx->out_next - ctx->out_begin) {
665 LZMS_DEBUG("Match underrun!");
669 out_next = ctx->out_next;
671 lz_copy(out_next, length, offset, ctx->out_end, 1);
672 ctx->out_next = out_next + length;
677 /* Validate a delta match and copy it to the output buffer. */
679 lzms_copy_delta_match(struct lzms_decompressor *ctx, u32 length,
680 u32 power, u32 raw_offset)
682 u32 offset1 = 1U << power;
683 u32 offset2 = raw_offset << power;
684 u32 offset = offset1 + offset2;
690 if (length > ctx->out_end - ctx->out_next) {
691 LZMS_DEBUG("Match overrun!");
694 if (offset > ctx->out_next - ctx->out_begin) {
695 LZMS_DEBUG("Match underrun!");
699 out_next = ctx->out_next;
700 matchptr1 = out_next - offset1;
701 matchptr2 = out_next - offset2;
702 matchptr = out_next - offset;
705 *out_next++ = *matchptr1++ + *matchptr2++ - *matchptr++;
707 ctx->out_next = out_next;
711 /* Decode a (length, offset) pair from the input. */
713 lzms_decode_lz_match(struct lzms_decompressor *ctx)
718 /* Decode the match offset. The next range-encoded bit indicates
719 * whether it's a repeat offset or an explicit offset. */
721 bit = lzms_range_decode_bit(&ctx->lz_match_range_decoder);
723 /* Explicit offset. */
724 offset = lzms_decode_value(&ctx->lz_offset_decoder);
729 for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
730 if (!lzms_range_decode_bit(&ctx->lz_repeat_match_range_decoders[i]))
733 offset = ctx->lru.lz.recent_offsets[i];
735 for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
736 ctx->lru.lz.recent_offsets[i] = ctx->lru.lz.recent_offsets[i + 1];
739 /* Decode match length, which is always given explicitly (there is no
740 * LRU queue for repeat lengths). */
741 length = lzms_decode_value(&ctx->length_decoder);
743 ctx->lru.lz.upcoming_offset = offset;
745 LZMS_DEBUG("Decoded %s LZ match: length=%u, offset=%u",
746 (bit ? "repeat" : "explicit"), length, offset);
748 /* Validate the match and copy it to the output. */
749 return lzms_copy_lz_match(ctx, length, offset);
752 /* Decodes a "delta" match from the input. */
754 lzms_decode_delta_match(struct lzms_decompressor *ctx)
757 u32 length, power, raw_offset;
759 /* Decode the match power and raw offset. The next range-encoded bit
760 * indicates whether these data are a repeat, or given explicitly. */
762 bit = lzms_range_decode_bit(&ctx->delta_match_range_decoder);
764 power = lzms_huffman_decode_symbol(&ctx->delta_power_decoder);
765 raw_offset = lzms_decode_value(&ctx->delta_offset_decoder);
769 for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
770 if (!lzms_range_decode_bit(&ctx->delta_repeat_match_range_decoders[i]))
773 power = ctx->lru.delta.recent_powers[i];
774 raw_offset = ctx->lru.delta.recent_offsets[i];
776 for (; i < LZMS_NUM_RECENT_OFFSETS; i++) {
777 ctx->lru.delta.recent_powers[i] = ctx->lru.delta.recent_powers[i + 1];
778 ctx->lru.delta.recent_offsets[i] = ctx->lru.delta.recent_offsets[i + 1];
782 length = lzms_decode_value(&ctx->length_decoder);
784 ctx->lru.delta.upcoming_power = power;
785 ctx->lru.delta.upcoming_offset = raw_offset;
787 LZMS_DEBUG("Decoded %s delta match: length=%u, power=%u, raw_offset=%u",
788 (bit ? "repeat" : "explicit"), length, power, raw_offset);
790 /* Validate the match and copy it to the output. */
791 return lzms_copy_delta_match(ctx, length, power, raw_offset);
794 /* Decode an LZ or delta match. */
796 lzms_decode_match(struct lzms_decompressor *ctx)
798 if (!lzms_range_decode_bit(&ctx->match_range_decoder))
799 return lzms_decode_lz_match(ctx);
801 return lzms_decode_delta_match(ctx);
804 /* Decode a literal byte encoded using the literal Huffman code. */
806 lzms_decode_literal(struct lzms_decompressor *ctx)
808 u8 literal = lzms_huffman_decode_symbol(&ctx->literal_decoder);
809 LZMS_DEBUG("Decoded literal: 0x%02x", literal);
810 return lzms_copy_literal(ctx, literal);
813 /* Decode the next LZMS match or literal. */
815 lzms_decode_item(struct lzms_decompressor *ctx)
819 ctx->lru.lz.upcoming_offset = 0;
820 ctx->lru.delta.upcoming_power = 0;
821 ctx->lru.delta.upcoming_offset = 0;
823 if (lzms_range_decode_bit(&ctx->main_range_decoder))
824 ret = lzms_decode_match(ctx);
826 ret = lzms_decode_literal(ctx);
831 lzms_update_lru_queues(&ctx->lru);
836 lzms_init_range_decoder(struct lzms_range_decoder *dec,
837 struct lzms_range_decoder_raw *rd, u32 num_states)
841 dec->mask = num_states - 1;
842 for (u32 i = 0; i < num_states; i++) {
843 dec->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
844 dec->prob_entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
849 lzms_init_huffman_decoder(struct lzms_huffman_decoder *dec,
850 struct lzms_input_bitstream *is,
851 const u32 *slot_base_tab,
852 const u8 *extra_bits_tab,
854 unsigned rebuild_freq)
857 dec->slot_base_tab = slot_base_tab;
858 dec->extra_bits_tab = extra_bits_tab;
859 dec->num_syms = num_syms;
860 dec->num_syms_read = rebuild_freq;
861 dec->rebuild_freq = rebuild_freq;
862 for (unsigned i = 0; i < num_syms; i++)
863 dec->sym_freqs[i] = 1;
866 /* Prepare to decode items from an LZMS-compressed block. */
868 lzms_init_decompressor(struct lzms_decompressor *ctx,
869 const void *cdata, unsigned clen,
870 void *ubuf, unsigned ulen)
872 unsigned num_offset_slots;
874 LZMS_DEBUG("Initializing decompressor (clen=%u, ulen=%u)", clen, ulen);
876 /* Initialize output pointers. */
877 ctx->out_begin = ubuf;
878 ctx->out_next = ubuf;
879 ctx->out_end = (u8*)ubuf + ulen;
881 /* Initialize the raw range decoder (reading forwards). */
882 lzms_range_decoder_raw_init(&ctx->rd, cdata, clen / 2);
884 /* Initialize the input bitstream for Huffman symbols (reading
886 lzms_input_bitstream_init(&ctx->is, cdata, clen / 2);
888 /* Calculate the number of offset slots needed for this compressed
890 num_offset_slots = lzms_get_offset_slot(ulen - 1) + 1;
892 LZMS_DEBUG("Using %u offset slots", num_offset_slots);
894 /* Initialize Huffman decoders for each alphabet used in the compressed
896 lzms_init_huffman_decoder(&ctx->literal_decoder, &ctx->is,
897 NULL, NULL, LZMS_NUM_LITERAL_SYMS,
898 LZMS_LITERAL_CODE_REBUILD_FREQ);
900 lzms_init_huffman_decoder(&ctx->lz_offset_decoder, &ctx->is,
901 lzms_offset_slot_base,
902 lzms_extra_offset_bits,
904 LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
906 lzms_init_huffman_decoder(&ctx->length_decoder, &ctx->is,
907 lzms_length_slot_base,
908 lzms_extra_length_bits,
910 LZMS_LENGTH_CODE_REBUILD_FREQ);
912 lzms_init_huffman_decoder(&ctx->delta_offset_decoder, &ctx->is,
913 lzms_offset_slot_base,
914 lzms_extra_offset_bits,
916 LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
918 lzms_init_huffman_decoder(&ctx->delta_power_decoder, &ctx->is,
919 NULL, NULL, LZMS_NUM_DELTA_POWER_SYMS,
920 LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
923 /* Initialize range decoders, all of which wrap around the same
924 * lzms_range_decoder_raw. */
925 lzms_init_range_decoder(&ctx->main_range_decoder,
926 &ctx->rd, LZMS_NUM_MAIN_STATES);
928 lzms_init_range_decoder(&ctx->match_range_decoder,
929 &ctx->rd, LZMS_NUM_MATCH_STATES);
931 lzms_init_range_decoder(&ctx->lz_match_range_decoder,
932 &ctx->rd, LZMS_NUM_LZ_MATCH_STATES);
934 for (size_t i = 0; i < ARRAY_LEN(ctx->lz_repeat_match_range_decoders); i++)
935 lzms_init_range_decoder(&ctx->lz_repeat_match_range_decoders[i],
936 &ctx->rd, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
938 lzms_init_range_decoder(&ctx->delta_match_range_decoder,
939 &ctx->rd, LZMS_NUM_DELTA_MATCH_STATES);
941 for (size_t i = 0; i < ARRAY_LEN(ctx->delta_repeat_match_range_decoders); i++)
942 lzms_init_range_decoder(&ctx->delta_repeat_match_range_decoders[i],
943 &ctx->rd, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
945 /* Initialize LRU match information. */
946 lzms_init_lru_queues(&ctx->lru);
948 LZMS_DEBUG("Decompressor successfully initialized");
951 /* Decode the series of literals and matches from the LZMS-compressed data.
952 * Returns 0 on success; nonzero if the compressed data is invalid. */
954 lzms_decode_items(const u8 *cdata, size_t clen, u8 *ubuf, size_t ulen,
955 struct lzms_decompressor *ctx)
957 /* Initialize the LZMS decompressor. */
958 lzms_init_decompressor(ctx, cdata, clen, ubuf, ulen);
960 /* Decode the sequence of items. */
961 while (ctx->out_next != ctx->out_end) {
962 LZMS_DEBUG("Position %u", ctx->out_next - ctx->out_begin);
963 if (lzms_decode_item(ctx))
970 lzms_decompress(const void *compressed_data, size_t compressed_size,
971 void *uncompressed_data, size_t uncompressed_size, void *_ctx)
973 struct lzms_decompressor *ctx = _ctx;
975 /* The range decoder requires that a minimum of 4 bytes of compressed
976 * data be initially available. */
977 if (compressed_size < 4) {
978 LZMS_DEBUG("Compressed size too small (got %zu, expected >= 4)",
983 /* An LZMS-compressed data block should be evenly divisible into 16-bit
985 if (compressed_size % 2 != 0) {
986 LZMS_DEBUG("Compressed size not divisible by 2 (got %zu)",
991 /* Handle the trivial case where nothing needs to be decompressed.
992 * (Necessary because a window of size 0 does not have a valid offset
994 if (uncompressed_size == 0)
997 /* Decode the literals and matches. */
998 if (lzms_decode_items(compressed_data, compressed_size,
999 uncompressed_data, uncompressed_size, ctx))
1002 /* Postprocess the data. */
1003 lzms_x86_filter(uncompressed_data, uncompressed_size,
1004 ctx->last_target_usages, true);
1006 LZMS_DEBUG("Decompression successful.");
1011 lzms_free_decompressor(void *_ctx)
1013 struct lzms_decompressor *ctx = _ctx;
1019 lzms_create_decompressor(size_t max_block_size, void **ctx_ret)
1021 struct lzms_decompressor *ctx;
1023 /* The x86 post-processor requires that the uncompressed length fit into
1024 * a signed 32-bit integer. Also, the offset slot table cannot be
1025 * searched for an offset of INT32_MAX or greater. */
1026 if (max_block_size >= INT32_MAX)
1027 return WIMLIB_ERR_INVALID_PARAM;
1029 ctx = ALIGNED_MALLOC(sizeof(struct lzms_decompressor),
1030 DECODE_TABLE_ALIGNMENT);
1032 return WIMLIB_ERR_NOMEM;
1034 /* Initialize offset and length slot data if not done already. */
1041 const struct decompressor_ops lzms_decompressor_ops = {
1042 .create_decompressor = lzms_create_decompressor,
1043 .decompress = lzms_decompress,
1044 .free_decompressor = lzms_free_decompressor,