4 * Functions used for decompression.
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
30 #include "wimlib/decompress.h"
31 #include "wimlib/util.h"
37 # define USE_SSE2_FILL
38 # include <emmintrin.h>
40 # define USE_LONG_FILL
45 * make_huffman_decode_table: - Builds a fast huffman decoding table from an
46 * array that gives the length of the codeword for each symbol in the alphabet.
47 * Originally based on code written by David Tritscher (taken the original LZX
48 * decompression code); also heavily modified to add some optimizations used in
49 * the zlib code, as well as more comments; also added some optimizations to
50 * make filling in the decode table entries faster (may not help significantly
53 * @decode_table: The array in which to create the fast huffman decoding
54 * table. It must have a length of at least
55 * (2**table_bits) + 2 * num_syms to guarantee
56 * that there is enough space.
58 * @num_syms: Number of symbols in the alphabet, including symbols
59 * that do not appear in this particular input chunk.
61 * @table_bits: Any symbols with a code length of table_bits or less can
62 * be decoded in one lookup of the table. 2**table_bits
63 * must be greater than or equal to @num_syms if there are
64 * any Huffman codes longer than @table_bits.
66 * @lens: An array of length @num_syms, indexable by symbol, that
67 * gives the length of the Huffman codeword for that
68 * symbol. Because the Huffman tree is in canonical form,
69 * it can be reconstructed by only knowing the length of
70 * the codeword for each symbol. It is assumed, but not
71 * checked, that every length is less than
74 * @max_codeword_len: The longest codeword length allowed in the compression
77 * Returns 0 on success; returns -1 if the length values do not correspond to a
80 * The format of the Huffamn decoding table is as follows. The first (1 <<
81 * table_bits) entries of the table are indexed by chunks of the input of size
82 * @table_bits. If the next Huffman codeword in the input happens to have a
83 * length of exactly @table_bits, the symbol is simply read directly from the
84 * decoding table. Alternatively, if the next Huffman codeword has length _less
85 * than_ @table_bits, the symbol is also read directly from the decode table;
86 * this is possible because every entry in the table that is indexed by an
87 * integer that has the shorter codeword as a binary prefix is filled in with
88 * the appropriate symbol. If a codeword has length n <= table_bits, it will
89 * have 2**(table_bits - n) possible suffixes, and thus that many entries in the
92 * It's a bit more complicated if the next Huffman codeword has length of more
93 * than @table_bits. The table entry indexed by the first @table_bits of that
94 * codeword cannot give the appropriate symbol directly, because that entry is
95 * guaranteed to be referenced by the Huffman codewords of multiple symbols.
96 * And while the LZX compression format does not allow codes longer than 16
97 * bits, a table of size (2 ** 16) = 65536 entries would be too slow to create.
99 * There are several different ways to make it possible to look up the symbols
100 * for codewords longer than @table_bits. One way is to make the entries for
101 * the prefixes of length @table_bits of those entries be pointers to additional
102 * decoding tables that are indexed by some number of additional bits of the
103 * codeword. The technique used here is a bit simpler, however: just store the
104 * needed subtrees of the Huffman tree in the decoding table after the lookup
105 * entries, beginning at index (2**table_bits). Real pointers are replaced by
106 * indices into the decoding table, and symbol entries are distinguished from
107 * pointers by the fact that values less than @num_syms must be symbol values.
110 make_huffman_decode_table(u16 *decode_table, unsigned num_syms,
111 unsigned table_bits, const u8 *lens,
112 unsigned max_codeword_len)
114 unsigned len_counts[max_codeword_len + 1];
115 u16 sorted_syms[num_syms];
116 unsigned offsets[max_codeword_len + 1];
117 const unsigned table_num_entries = 1 << table_bits;
119 unsigned decode_table_pos;
120 void *decode_table_ptr;
122 unsigned codeword_len;
123 unsigned stores_per_loop;
126 const unsigned entries_per_long = sizeof(unsigned long) / sizeof(decode_table[0]);
130 const unsigned entries_per_xmm = sizeof(__m128i) / sizeof(decode_table[0]);
133 wimlib_assert2((uintptr_t)decode_table % DECODE_TABLE_ALIGNMENT == 0);
135 /* accumulate lengths for codes */
136 for (unsigned i = 0; i <= max_codeword_len; i++)
139 for (unsigned sym = 0; sym < num_syms; sym++) {
140 wimlib_assert2(lens[sym] <= max_codeword_len);
141 len_counts[lens[sym]]++;
144 /* check for an over-subscribed or incomplete set of lengths */
146 for (unsigned len = 1; len <= max_codeword_len; len++) {
148 left -= len_counts[len];
149 if (unlikely(left < 0)) { /* over-subscribed */
150 DEBUG("Invalid Huffman code (over-subscribed)");
155 if (unlikely(left != 0)) /* incomplete set */{
156 if (left == 1 << max_codeword_len) {
157 /* Empty code--- okay in XPRESS and LZX */
158 memset(decode_table, 0,
159 table_num_entries * sizeof(decode_table[0]));
162 DEBUG("Invalid Huffman code (incomplete set)");
167 /* Generate offsets into symbol table for each length for sorting */
169 for (unsigned len = 1; len < max_codeword_len; len++)
170 offsets[len + 1] = offsets[len] + len_counts[len];
172 /* Sort symbols primarily by length and secondarily by symbol order.
173 * This is basically a count-sort over the codeword lengths. */
174 for (unsigned sym = 0; sym < num_syms; sym++)
176 sorted_syms[offsets[lens[sym]]++] = sym;
178 /* Fill entries for codewords short enough for a direct mapping. We can
179 * take advantage of the ordering of the codewords, since the Huffman
180 * code is canonical. It must be the case that all the codewords of
181 * some length L numerically precede all the codewords of length L + 1.
182 * Furthermore, if we have 2 symbols A and B with the same codeword
183 * length but symbol A is sorted before symbol B, then then we know that
184 * the codeword for A numerically precedes the codeword for B. */
185 decode_table_ptr = decode_table;
189 /* Fill in the Huffman decode table entries one 128-bit vector at a
190 * time. This is 8 entries per store. */
191 stores_per_loop = (1 << (table_bits - codeword_len)) / entries_per_xmm;
192 for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
193 unsigned end_sym_idx = sym_idx + len_counts[codeword_len];
194 for (; sym_idx < end_sym_idx; sym_idx++) {
195 /* Note: unlike in the 'long' version below, the __m128i
196 * type already has __attribute__((may_alias)), so using
197 * it to access the decode table, which is an array of
198 * unsigned shorts, will not violate strict aliasing. */
204 sym = sorted_syms[sym_idx];
206 v = _mm_set1_epi16(sym);
207 p = (__m128i*)decode_table_ptr;
212 decode_table_ptr = p;
215 #endif /* USE_SSE2_FILL */
218 /* Fill in the Huffman decode table entries one 'unsigned long' at a
219 * time. On 32-bit systems this is 2 entries per store, while on 64-bit
220 * systems this is 4 entries per store. */
221 stores_per_loop = (1 << (table_bits - codeword_len)) / entries_per_long;
222 for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
223 unsigned end_sym_idx = sym_idx + len_counts[codeword_len];
224 for (; sym_idx < end_sym_idx; sym_idx++) {
226 /* Accessing the array of unsigned shorts as unsigned
227 * longs would violate strict aliasing and would require
228 * compiling the code with -fno-strict-aliasing to
229 * guarantee correctness. To work around this problem,
230 * use the gcc 'may_alias' extension to define a special
231 * unsigned long type that may alias any other in-memory
233 typedef unsigned long __attribute__((may_alias)) aliased_long_t;
240 sym = sorted_syms[sym_idx];
242 BUILD_BUG_ON(sizeof(aliased_long_t) != 4 &&
243 sizeof(aliased_long_t) != 8);
246 if (sizeof(aliased_long_t) >= 4)
248 if (sizeof(aliased_long_t) >= 8) {
249 /* This may produce a compiler warning if an
250 * aliased_long_t is 32 bits, but this won't be
251 * executed unless an aliased_long_t is at least
256 p = (aliased_long_t *)decode_table_ptr;
262 decode_table_ptr = p;
265 #endif /* USE_LONG_FILL */
267 /* Fill in the Huffman decode table entries one 16-bit integer at a
269 stores_per_loop = (1 << (table_bits - codeword_len));
270 for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
271 unsigned end_sym_idx = sym_idx + len_counts[codeword_len];
272 for (; sym_idx < end_sym_idx; sym_idx++) {
277 sym = sorted_syms[sym_idx];
279 p = (u16*)decode_table_ptr;
286 decode_table_ptr = p;
290 /* If we've filled in the entire table, we are done. Otherwise, there
291 * are codes longer than table bits that we need to store in the
292 * tree-like structure at the end of the table rather than directly in
293 * the main decode table itself. */
295 decode_table_pos = (u16*)decode_table_ptr - decode_table;
296 if (decode_table_pos != table_num_entries) {
298 unsigned next_free_tree_slot;
299 unsigned cur_codeword;
301 wimlib_assert2(decode_table_pos < table_num_entries);
303 /* Fill in the remaining entries, which correspond to codes
304 * longer than @table_bits.
306 * First, zero out the rest of the entries. This is necessary
307 * so that the entries appear as "unallocated" in the next part.
309 j = decode_table_pos;
312 } while (++j != table_num_entries);
314 /* Assert that 2**table_bits is at least num_syms. If this
315 * wasn't the case, we wouldn't be able to distinguish pointer
316 * entries from symbol entries. */
317 wimlib_assert2(table_num_entries >= num_syms);
320 /* The tree nodes are allocated starting at decode_table[1 <<
321 * table_bits]. Remember that the full size of the table,
322 * including the extra space for the tree nodes, is actually
323 * 2**table_bits + 2 * num_syms slots, while table_num_entries
324 * is only 2**table_bits. */
325 next_free_tree_slot = table_num_entries;
327 /* The current Huffman codeword */
328 cur_codeword = decode_table_pos << 1;
330 /* Go through every codeword of length greater than @table_bits,
331 * primarily in order of codeword length and secondarily in
332 * order of symbol. */
333 wimlib_assert2(codeword_len == table_bits + 1);
334 for (; codeword_len <= max_codeword_len; codeword_len++, cur_codeword <<= 1)
336 unsigned end_sym_idx = sym_idx + len_counts[codeword_len];
337 for (; sym_idx < end_sym_idx; sym_idx++, cur_codeword++) {
338 unsigned sym = sorted_syms[sym_idx];
339 unsigned extra_bits = codeword_len - table_bits;
341 /* index of the current node; find it from the
342 * prefix of the current Huffman codeword. */
343 unsigned node_idx = cur_codeword >> extra_bits;
344 wimlib_assert2(node_idx < table_num_entries);
346 /* Go through each bit of the current Huffman
347 * codeword beyond the prefix of length
348 * @table_bits and walk the tree, allocating any
349 * slots that have not yet been allocated. */
352 /* If the current tree node points to
353 * nowhere but we need to follow it,
354 * allocate a new node for it to point
356 if (decode_table[node_idx] == 0) {
357 decode_table[node_idx] = next_free_tree_slot;
358 decode_table[next_free_tree_slot++] = 0;
359 decode_table[next_free_tree_slot++] = 0;
360 wimlib_assert2(next_free_tree_slot <=
361 table_num_entries + 2 * num_syms);
364 /* Set node_idx to left child */
365 node_idx = decode_table[node_idx];
367 /* Is the next bit 0 or 1? If 0, go left
368 * (already done). If 1, go right by
369 * incrementing node_idx. */
371 node_idx += (cur_codeword >> extra_bits) & 1;
372 } while (extra_bits != 0);
374 /* node_idx is now the index of the leaf entry
375 * into which the actual symbol will go. */
376 decode_table[node_idx] = sym;
378 /* Note: cur_codeword is always incremented at
379 * the end of this loop because this is how
380 * canonical Huffman codes are generated (add 1
381 * for each code, then left shift whenever the
382 * code length increases) */
389 /* Reads a Huffman-encoded symbol from the bistream when the number of remaining
390 * bits is less than the maximum codeword length. */
392 read_huffsym_near_end_of_input(struct input_bitstream *istream,
393 const u16 decode_table[],
399 unsigned bitsleft = istream->bitsleft;
404 if (table_bits > bitsleft) {
407 key_bits = bitstream_peek_bits(istream, key_size) <<
408 (table_bits - key_size);
410 key_size = table_bits;
411 bitsleft -= table_bits;
412 key_bits = bitstream_peek_bits(istream, table_bits);
415 sym = decode_table[key_bits];
416 if (sym >= num_syms) {
417 bitstream_remove_bits(istream, key_size);
420 DEBUG("Input stream exhausted");
423 key_bits = sym + bitstream_peek_bits(istream, 1);
424 bitstream_remove_bits(istream, 1);
426 } while ((sym = decode_table[key_bits]) >= num_syms);
428 bitstream_remove_bits(istream, lens[sym]);