4 * Header for decompression code shared by multiple compression formats.
6 * The author dedicates this file to the public domain.
7 * You can do whatever you want with this file.
10 #ifndef _WIMLIB_DECOMPRESS_COMMON_H
11 #define _WIMLIB_DECOMPRESS_COMMON_H
13 #include "wimlib/assert.h"
14 #include "wimlib/compiler.h"
15 #include "wimlib/endianness.h"
16 #include "wimlib/types.h"
18 /* Structure to encapsulate a block of in-memory data that is being interpreted
19 * as a stream of bits.
21 * This is geared specifically towards the XPRESS and LZX compression formats
22 * with regards to the actual ordering the bits within the byte sequence. */
23 struct input_bitstream {
25 /* A variable of length at least 32 bits that is used to hold bits that
26 * have been read from the stream. The bits are ordered from high-order
27 * to low-order, and the next bit is always the high-order bit. */
30 /* Number of bits in @bitbuf that are valid. */
33 /* Pointer to the next byte to be retrieved from the input. */
36 /* Number of bytes of data that are left. */
40 /* Initializes a bitstream to receive its input from @data. */
42 init_input_bitstream(struct input_bitstream *istream,
43 const void *data, u32 num_data_bytes)
46 istream->bitsleft = 0;
48 istream->data_bytes_left = num_data_bytes;
51 /* Ensures the bit buffer variable for the bitstream contains at least @num_bits
52 * bits. Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
53 * may be called on the bitstream to peek or remove up to @num_bits bits.
55 * If the input data is exhausted, any further bits are assumed to be 0. */
57 bitstream_ensure_bits(struct input_bitstream *istream, unsigned num_bits)
62 /* This currently works for at most 17 bits. */
63 wimlib_assert2(num_bits <= 17);
65 if (istream->bitsleft >= num_bits)
68 nextword = le16_to_cpu(*(const le16*)istream->data);
69 shift = sizeof(istream->bitbuf) * 8 - 16 - istream->bitsleft;
70 istream->bitbuf |= (u32)nextword << shift;
72 istream->bitsleft += 16;
73 istream->data_bytes_left -= 2;
75 /* Help the compiler: If it's known at compile-time that num_bits <= 16,
76 * a second word will never be needed. */
77 if (!(is_constant(num_bits) && num_bits <= 16) &&
78 unlikely(istream->bitsleft < num_bits))
80 nextword = le16_to_cpu(*(const le16*)istream->data);
81 shift = sizeof(istream->bitbuf) * 8 - 16 - istream->bitsleft;
82 istream->bitbuf |= (u32)nextword << shift;
84 istream->bitsleft += 16;
85 istream->data_bytes_left -= 2;
89 /* Returns the next @num_bits bits from the bitstream, without removing them.
90 * There must be at least @num_bits remaining in the buffer variable, from a
91 * previous call to bitstream_ensure_bits(). */
93 bitstream_peek_bits(const struct input_bitstream *istream, unsigned num_bits)
95 if (unlikely(num_bits == 0))
97 return istream->bitbuf >> (sizeof(istream->bitbuf) * 8 - num_bits);
100 /* Removes @num_bits from the bitstream. There must be at least @num_bits
101 * remaining in the buffer variable, from a previous call to
102 * bitstream_ensure_bits(). */
104 bitstream_remove_bits(struct input_bitstream *istream, unsigned num_bits)
106 istream->bitbuf <<= num_bits;
107 istream->bitsleft -= num_bits;
110 /* Removes and returns @num_bits bits from the bitstream. There must be at
111 * least @num_bits remaining in the buffer variable, from a previous call to
112 * bitstream_ensure_bits(). */
114 bitstream_pop_bits(struct input_bitstream *istream, unsigned num_bits)
116 u32 n = bitstream_peek_bits(istream, num_bits);
117 bitstream_remove_bits(istream, num_bits);
121 /* Reads and returns the next @num_bits bits from the bitstream.
122 * If the input data is exhausted, the bits are assumed to be 0. */
124 bitstream_read_bits(struct input_bitstream *istream, unsigned num_bits)
126 bitstream_ensure_bits(istream, num_bits);
127 return bitstream_pop_bits(istream, num_bits);
130 /* Reads and returns the next literal byte embedded in the bitstream.
131 * If the input data is exhausted, the byte is assumed to be 0. */
133 bitstream_read_byte(struct input_bitstream *istream)
135 if (unlikely(istream->data_bytes_left == 0))
137 istream->data_bytes_left--;
138 return *istream->data++;
142 /* Needed alignment of decode_table parameter to make_huffman_decode_table().
144 * Reason: We may fill the entries with SSE instructions without worrying
145 * about dealing with the unaligned case. */
146 #define DECODE_TABLE_ALIGNMENT 16
148 /* Maximum supported symbol count for make_huffman_decode_table().
150 * Reason: In direct mapping entries, we store the symbol in 11 bits. */
151 #define DECODE_TABLE_MAX_SYMBOLS 2048
153 /* Maximum supported table bits for make_huffman_decode_table().
155 * Reason: In internal binary tree nodes, offsets are encoded in 14 bits.
156 * But the real limit is 13, because we allocate entries past the end of
157 * the direct lookup part of the table for binary tree nodes. (Note: if
158 * needed this limit could be removed by encoding the offsets relative to
159 * &decode_table[1 << table_bits].) */
160 #define DECODE_TABLE_MAX_TABLE_BITS 13
162 /* Maximum supported codeword length for make_huffman_decode_table().
164 * Reason: In direct mapping entries, we encode the codeword length in 5
165 * bits, and the top 2 bits can't both be set because that has special
167 #define DECODE_TABLE_MAX_CODEWORD_LEN 23
169 /* Reads and returns the next Huffman-encoded symbol from a bitstream. If the
170 * input data is exhausted, the Huffman symbol is decoded as if the missing bits
173 * XXX: This is mostly duplicated in lzms_huffman_decode_symbol() in
174 * lzms-decompress.c. */
176 read_huffsym(struct input_bitstream *istream, const u16 decode_table[],
177 unsigned table_bits, unsigned max_codeword_len)
182 bitstream_ensure_bits(istream, max_codeword_len);
184 /* Index the decode table by the next table_bits bits of the input. */
185 key_bits = bitstream_peek_bits(istream, table_bits);
186 entry = decode_table[key_bits];
187 if (likely(entry < 0xC000)) {
188 /* Fast case: The decode table directly provided the
189 * symbol and codeword length. The low 11 bits are the
190 * symbol, and the high 5 bits are the codeword length. */
191 bitstream_remove_bits(istream, entry >> 11);
192 return entry & 0x7FF;
194 /* Slow case: The codeword for the symbol is longer than
195 * table_bits, so the symbol does not have an entry
196 * directly in the first (1 << table_bits) entries of the
197 * decode table. Traverse the appropriate binary tree
198 * bit-by-bit to decode the symbol. */
199 bitstream_remove_bits(istream, table_bits);
201 key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
202 } while ((entry = decode_table[key_bits]) >= 0xC000);
208 make_huffman_decode_table(u16 decode_table[], unsigned num_syms,
209 unsigned num_bits, const u8 lens[],
210 unsigned max_codeword_len);
214 * Copy a LZ77 match at (dst - offset) to dst.
216 * The length and offset must be already validated --- that is, (dst - offset)
217 * can't underrun the output buffer, and (dst + length) can't overrun the output
218 * buffer. Also, the length cannot be 0.
220 * @winend points to the byte past the end of the output buffer.
221 * This function won't write any data beyond this position.
224 lz_copy(u8 *dst, unsigned length, unsigned offset, const u8 *winend)
226 const u8 *src = dst - offset;
227 #if defined(__x86_64__) || defined(__i386__)
228 /* Copy one 'unsigned long' at a time. On i386 and x86_64 this is
229 * faster than copying one byte at a time, unless the data is
230 * near-random and all the matches have very short lengths. Note that
231 * since this requires unaligned memory accesses, it won't necessarily
232 * be faster on every architecture.
234 * Also note that we might copy more than the length of the match. For
235 * example, if an 'unsigned long' is 8 bytes and the match is of length
236 * 5, then we'll simply copy 8 bytes. This is okay as long as we don't
237 * write beyond the end of the output buffer, hence the check for
238 * (winend - (dst + length) >= sizeof(unsigned long) - 1). */
239 if (offset >= sizeof(unsigned long) &&
240 winend - (dst + length) >= sizeof(unsigned long) - 1)
242 /* Access memory through a packed struct. This tricks the
243 * compiler into allowing unaligned memory accesses. */
244 struct ulong_wrapper {
248 const u8 *end = dst + length;
250 unsigned long v = ((struct ulong_wrapper *)src)->v;
251 ((struct ulong_wrapper *)dst)->v = v;
252 dst += sizeof(unsigned long);
253 src += sizeof(unsigned long);
264 #endif /* _WIMLIB_DECOMPRESS_COMMON_H */