4 * Make a canonical Huffman code from symbol frequencies; reconstruct a
5 * canonical Huffman code from codeword lengths, making it into a table for fast
6 * decoding of the input.
8 * Copyright (C) 2012 Eric Biggers
9 * Copyright (C) 2002 Matthew T. Russotto
11 * wimlib - Library for working with WIM files
13 * This library is free software; you can redistribute it and/or modify it under
14 * the terms of the GNU Lesser General Public License as published by the Free
15 * Software Foundation; either version 2.1 of the License, or (at your option)
18 * This library is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
20 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
23 * You should have received a copy of the GNU Lesser General Public License
24 * along with this library; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 /* Intermediate (non-leaf) node in a Huffman tree. */
34 typedef struct HuffmanNode {
41 struct HuffmanNode *left_child;
42 struct HuffmanNode *right_child;
45 /* Leaf node in a Huffman tree. The fields are in the same order as the
46 * HuffmanNode, so it can be cast to a HuffmanNode. There are no pointers to
47 * the children in the leaf node. */
57 /* Comparator function for HuffmanLeafNodes. Sorts primarily by symbol
58 * frequency and secondarily by symbol value. */
59 static int cmp_leaves_by_freq(const void *__leaf1, const void *__leaf2)
61 const HuffmanLeafNode *leaf1 = __leaf1;
62 const HuffmanLeafNode *leaf2 = __leaf2;
64 int freq_diff = (int)leaf1->freq - (int)leaf2->freq;
67 return (int)leaf1->sym - (int)leaf2->sym;
72 /* Comparator function for HuffmanLeafNodes. Sorts primarily by code length and
73 * secondarily by symbol value. */
74 static int cmp_leaves_by_code_len(const void *__leaf1, const void *__leaf2)
76 const HuffmanLeafNode *leaf1 = __leaf1;
77 const HuffmanLeafNode *leaf2 = __leaf2;
79 int code_len_diff = (int)leaf1->path_len - (int)leaf2->path_len;
81 if (code_len_diff == 0)
82 return (int)leaf1->sym - (int)leaf2->sym;
87 /* Recursive function to calculate the depth of the leaves in a Huffman tree.
89 static void huffman_tree_compute_path_lengths(HuffmanNode *node, u16 cur_len)
91 if (node->sym == (u16)(-1)) {
92 /* Intermediate node. */
93 huffman_tree_compute_path_lengths(node->left_child, cur_len + 1);
94 huffman_tree_compute_path_lengths(node->right_child, cur_len + 1);
97 node->path_len = cur_len;
101 /* Creates a canonical Huffman code from an array of symbol frequencies.
103 * The algorithm used is similar to the well-known algorithm that builds a
104 * Huffman tree using a minheap. In that algorithm, the leaf nodes are
105 * initialized and inserted into the minheap with the frequency as the key.
106 * Repeatedly, the top two nodes (nodes with the lowest frequency) are taken out
107 * of the heap and made the children of a new node that has a frequency equal to
108 * the sum of the two frequencies of its children. This new node is inserted
109 * into the heap. When all the nodes have been removed from the heap, what
110 * remains is the Huffman tree. The Huffman code for a symbol is given by the
111 * path to it in the tree, where each left pointer is mapped to a 0 bit and each
112 * right pointer is mapped to a 1 bit.
114 * The algorithm used here uses an optimization that removes the need to
115 * actually use a heap. The leaf nodes are first sorted by frequency, as
116 * opposed to being made into a heap. Note that this sorting step takes O(n log
117 * n) time vs. O(n) time for heapifying the array, where n is the number of
118 * symbols. However, the heapless method is probably faster overall, due to the
119 * time saved later. In the heapless method, whenever an intermediate node is
120 * created, it is not inserted into the sorted array. Instead, the intermediate
121 * nodes are kept in a separate array, which is easily kept sorted because every
122 * time an intermediate node is initialized, it will have a frequency at least
123 * as high as that of the previous intermediate node that was initialized. So
124 * whenever we want the 2 nodes, leaf or intermediate, that have the lowest
125 * frequency, we check the low-frequency ends of both arrays, which is an O(1)
128 * The function builds a canonical Huffman code, not just any Huffman code. A
129 * Huffman code is canonical if the codeword for each symbol numerically
130 * precedes the codeword for all other symbols of the same length that are
131 * numbered higher than the symbol, and additionally, all shorter codewords,
132 * 0-extended, numerically precede longer codewords. A canonical Huffman code
133 * is useful because it can be reconstructed by only knowing the path lengths in
134 * the tree. See the make_huffman_decode_table() function to see how to
135 * reconstruct a canonical Huffman code from only the lengths of the codes.
137 * @num_syms: The number of symbols in the alphabet.
139 * @max_codeword_len: The maximum allowed length of a codeword in the code.
140 * Note that if the code being created runs up against
141 * this restriction, the code ultimately created will be
142 * suboptimal, although there are some advantages for
143 * limiting the length of the codewords.
145 * @freq_tab: An array of length @num_syms that contains the frequencies
146 * of each symbol in the uncompressed data.
148 * @lens: An array of length @num_syms into which the lengths of the
149 * codewords for each symbol will be written.
151 * @codewords: An array of @num_syms short integers into which the
152 * codewords for each symbol will be written. The first
153 * lens[i] bits of codewords[i] will contain the codeword
156 void make_canonical_huffman_code(uint num_syms, uint max_codeword_len,
157 const u32 freq_tab[], u8 lens[],
160 /* We require at least 2 possible symbols in the alphabet to produce a
161 * valid Huffman decoding table. It is allowed that fewer than 2 symbols
162 * are actually used, though. */
163 wimlib_assert(num_syms >= 2);
165 /* Initialize the lengths and codewords to 0 */
166 memset(lens, 0, num_syms * sizeof(lens[0]));
167 memset(codewords, 0, num_syms * sizeof(codewords[0]));
169 /* Calculate how many symbols have non-zero frequency. These are the
170 * symbols that actually appeared in the input. */
171 uint num_used_symbols = 0;
172 for (uint i = 0; i < num_syms; i++)
173 if (freq_tab[i] != 0)
177 /* It is impossible to make a code for num_used_symbols symbols if there
178 * aren't enough code bits to uniquely represent all of them. */
179 wimlib_assert((1 << max_codeword_len) > num_used_symbols);
181 /* Initialize the array of leaf nodes with the symbols and their
183 HuffmanLeafNode leaves[num_used_symbols];
185 for (uint i = 0; i < num_syms; i++) {
186 if (freq_tab[i] != 0) {
187 leaves[leaf_idx].freq = freq_tab[i];
188 leaves[leaf_idx].sym = i;
189 leaves[leaf_idx].height = 0;
194 /* Deal with the special cases where num_used_symbols < 2. */
195 if (num_used_symbols < 2) {
196 if (num_used_symbols == 0) {
197 /* If num_used_symbols is 0, there are no symbols in the
198 * input, so it must be empty. This should be an error,
199 * but the LZX format expects this case to succeed. All
200 * the codeword lengths are simply marked as 0 (which
201 * was already done.) */
203 /* If only one symbol is present, the LZX format
204 * requires that the Huffman code include two codewords.
205 * One is not used. Note that this doesn't make the
206 * encoded data take up more room anyway, since binary
207 * data itself has 2 symbols. */
209 uint sym = leaves[0].sym;
214 /* dummy symbol is 1, real symbol is 0 */
218 /* dummy symbol is 0, real symbol is sym */
226 /* Otherwise, there are at least 2 symbols in the input, so we need to
227 * find a real Huffman code. */
230 /* Declare the array of intermediate nodes. An intermediate node is not
231 * associated with a symbol. Instead, it represents some binary code
232 * prefix that is shared between at least 2 codewords. There can be at
233 * most num_used_symbols - 1 intermediate nodes when creating a Huffman
234 * code. This is because if there were at least num_used_symbols nodes,
235 * the code would be suboptimal because there would be at least one
236 * unnecessary intermediate node.
238 * The worst case (greatest number of intermediate nodes) would be if
239 * all the intermediate nodes were chained together. This results in
240 * num_used_symbols - 1 intermediate nodes. If num_used_symbols is at
241 * least 17, this configuration would not be allowed because the LZX
242 * format constrains codes to 16 bits or less each. However, it is
243 * still possible for there to be more than 16 intermediate nodes, as
244 * long as no leaf has a depth of more than 16. */
245 HuffmanNode inodes[num_used_symbols - 1];
248 /* Pointer to the leaf node of lowest frequency that hasn't already been
249 * added as the child of some intermediate note. */
250 HuffmanLeafNode *cur_leaf = &leaves[0];
252 /* Pointer past the end of the array of leaves. */
253 HuffmanLeafNode *end_leaf = &leaves[num_used_symbols];
255 /* Pointer to the intermediate node of lowest frequency. */
256 HuffmanNode *cur_inode = &inodes[0];
258 /* Pointer to the next unallocated intermediate node. */
259 HuffmanNode *next_inode = &inodes[0];
261 /* Only jump back to here if the maximum length of the codewords allowed
262 * by the LZX format (16 bits) is exceeded. */
263 try_building_tree_again:
265 /* Sort the leaves from those that correspond to the least frequent
266 * symbol, to those that correspond to the most frequent symbol. If two
267 * leaves have the same frequency, they are sorted by symbol. */
268 qsort(leaves, num_used_symbols, sizeof(leaves[0]), cmp_leaves_by_freq);
270 cur_leaf = &leaves[0];
271 cur_inode = &inodes[0];
272 next_inode = &inodes[0];
274 /* The following loop takes the two lowest frequency nodes of those
275 * remaining and makes them the children of the next available
276 * intermediate node. It continues until all the leaf nodes and
277 * intermediate nodes have been used up, or the maximum allowed length
278 * for the codewords is exceeded. For the latter case, we must adjust
279 * the frequencies to be more equal and then execute this loop again. */
282 /* Lowest frequency node. */
283 HuffmanNode *f1 = NULL;
285 /* Second lowest frequency node. */
286 HuffmanNode *f2 = NULL;
288 /* Get the lowest and second lowest frequency nodes from
289 * the remaining leaves or from the intermediate nodes.
292 if (cur_leaf != end_leaf && (cur_inode == next_inode ||
293 cur_leaf->freq <= cur_inode->freq)) {
294 f1 = (HuffmanNode*)cur_leaf++;
295 } else if (cur_inode != next_inode) {
299 if (cur_leaf != end_leaf && (cur_inode == next_inode ||
300 cur_leaf->freq <= cur_inode->freq)) {
301 f2 = (HuffmanNode*)cur_leaf++;
302 } else if (cur_inode != next_inode) {
306 /* All nodes used up! */
307 if (f1 == NULL || f2 == NULL)
310 /* next_inode becomes the parent of f1 and f2. */
312 next_inode->freq = f1->freq + f2->freq;
313 next_inode->sym = (u16)(-1); /* Invalid symbol. */
314 next_inode->left_child = f1;
315 next_inode->right_child = f2;
317 /* We need to keep track of the height so that we can detect if
318 * the length of a codeword has execeed max_codeword_len. The
319 * parent node has a height one higher than the maximum height
320 * of its children. */
321 next_inode->height = max(f1->height, f2->height) + 1;
323 /* Check to see if the code length of the leaf farthest away
324 * from next_inode has exceeded the maximum code length. */
325 if (next_inode->height > max_codeword_len) {
326 /* The code lengths can be made more uniform by making
327 * the frequencies more uniform. Divide all the
328 * frequencies by 2, leaving 1 as the minimum frequency.
329 * If this keeps happening, the symbol frequencies will
330 * approach equality, which makes their Huffman
331 * codewords approach the length
332 * log_2(num_used_symbols).
334 for (uint i = 0; i < num_used_symbols; i++)
335 if (leaves[i].freq > 1)
336 leaves[i].freq >>= 1;
337 goto try_building_tree_again;
342 /* The Huffman tree is now complete, and its height is no more than
343 * max_codeword_len. */
345 HuffmanNode *root = next_inode - 1;
346 wimlib_assert(root->height <= max_codeword_len);
348 /* Compute the path lengths for the leaf nodes. */
349 huffman_tree_compute_path_lengths(root, 0);
351 /* Sort the leaf nodes primarily by code length and secondarily by
353 qsort(leaves, num_used_symbols, sizeof(leaves[0]), cmp_leaves_by_code_len);
355 u16 cur_codeword = 0;
356 uint cur_codeword_len = 0;
357 for (uint i = 0; i < num_used_symbols; i++) {
359 /* Each time a codeword becomes one longer, the current codeword
360 * is left shifted by one place. This is part of the procedure
361 * for enumerating the canonical Huffman code. Additionally,
362 * whenever a codeword is used, 1 is added to the current
365 uint len_diff = leaves[i].path_len - cur_codeword_len;
366 cur_codeword <<= len_diff;
367 cur_codeword_len += len_diff;
369 u16 sym = leaves[i].sym;
370 codewords[sym] = cur_codeword;
371 lens[sym] = cur_codeword_len;
378 * Builds a fast huffman decoding table from a canonical huffman code lengths
379 * table. Based on code written by David Tritscher.
381 * @decode_table: The array in which to create the fast huffman decoding
382 * table. It must have a length of at least
383 * (2**num_bits) + 2 * num_syms to guarantee
384 * that there is enough space.
386 * @num_syms: Total number of symbols in the Huffman tree.
388 * @num_bits: Any symbols with a code length of num_bits or less can be
389 * decoded in one lookup of the table. 2**num_bits
390 * must be greater than or equal to @num_syms if there are
391 * any Huffman codes longer than @num_bits.
393 * @lens: An array of length @num_syms, indexable by symbol, that
394 * gives the length of that symbol. Because the Huffman
395 * tree is in canonical form, it can be reconstructed by
396 * only knowing the length of the code for each symbol.
398 * @make_codeword_len: An integer that gives the longest possible codeword
401 * Returns 0 on success; returns 1 if the length values do not correspond to a
402 * valid Huffman tree, or if there are codes of length greater than @num_bits
403 * but 2**num_bits < num_syms.
405 * What exactly is the format of the fast Huffman decoding table? The first
406 * (1 << num_bits) entries of the table are indexed by chunks of the input of
407 * size @num_bits. If the next Huffman code in the input happens to have a
408 * length of exactly @num_bits, the symbol is simply read directly from the
409 * decoding table. Alternatively, if the next Huffman code has length _less
410 * than_ @num_bits, the symbol is also read directly from the decode table; this
411 * is possible because every entry in the table that is indexed by an integer
412 * that has the shorter code as a binary prefix is filled in with the
413 * appropriate symbol. If a code has length n <= num_bits, it will have
414 * 2**(num_bits - n) possible suffixes, and thus that many entries in the
417 * It's a bit more complicated if the next Huffman code has length of more than
418 * @num_bits. The table entry indexed by the first @num_bits of that code
419 * cannot give the appropriate symbol directly, because that entry is guaranteed
420 * to be referenced by the Huffman codes for multiple symbols. And while the
421 * LZX compression format does not allow codes longer than 16 bits, a table of
422 * size (2 ** 16) = 65536 entries would be too slow to create.
424 * There are several different ways to make it possible to look up the symbols
425 * for codes longer than @num_bits. A common way is to make the entries for the
426 * prefixes of length @num_bits of those entries be pointers to additional
427 * decoding tables that are indexed by some number of additional bits of the
428 * code symbol. The technique used here is a bit simpler, however. We just
429 * store the needed subtrees of the Huffman tree in the decoding table after the
430 * lookup entries, beginning at index (2**num_bits). Real pointers are
431 * replaced by indices into the decoding table, and we distinguish symbol
432 * entries from pointers by the fact that values less than @num_syms must be
435 int make_huffman_decode_table(u16 decode_table[], uint num_syms,
436 uint num_bits, const u8 lens[],
439 /* Number of entries in the decode table. */
440 u32 table_num_entries = 1 << num_bits;
442 /* Current position in the decode table. */
443 u32 decode_table_pos = 0;
445 /* Fill entries for codes short enough for a direct mapping. Here we
446 * are taking advantage of the ordering of the codes, since they are for
447 * a canonical Huffman tree. It must be the case that all the codes of
448 * some length @code_length, zero-extended or one-extended, numerically
449 * precede all the codes of length @code_length + 1. Furthermore, if we
450 * have 2 symbols A and B, such that A is listed before B in the lens
451 * array, and both symbols have the same code length, then we know that
452 * the code for A numerically precedes the code for B.
454 for (uint code_len = 1; code_len <= num_bits; code_len++) {
456 /* Number of entries that a code of length @code_length would
458 u32 code_num_entries = 1 << (num_bits - code_len);
461 /* For each symbol of length @code_len, fill in its entries in
462 * the decode table. */
463 for (uint sym = 0; sym < num_syms; sym++) {
465 if (lens[sym] != code_len)
469 /* Check for table overrun. This can only happen if the
470 * given lengths do not correspond to a valid Huffman
472 if (decode_table_pos >= table_num_entries) {
473 ERROR("Huffman decoding table overrun: "
474 "pos = %u, num_entries = %u\n",
480 /* Fill all possible lookups of this symbol with
481 * the symbol itself. */
482 for (uint i = 0; i < code_num_entries; i++)
483 decode_table[decode_table_pos + i] = sym;
485 /* Increment the position in the decode table by
486 * the number of entries that were just filled
488 decode_table_pos += code_num_entries;
492 /* If all entries of the decode table have been filled in, there are no
493 * codes longer than num_bits, so we are done filling in the decode
495 if (decode_table_pos == table_num_entries)
498 /* Otherwise, fill in the remaining entries, which correspond to codes longer
502 /* First, zero out the rest of the entries; this is necessary so
503 * that the entries appear as "unallocated" in the next part. */
504 for (uint i = decode_table_pos; i < table_num_entries; i++)
507 /* Assert that 2**num_bits is at least num_syms. If this wasn't the
508 * case, we wouldn't be able to distinguish pointer entries from symbol
510 wimlib_assert((1 << num_bits) >= num_syms);
513 /* The current Huffman code. */
514 uint current_code = decode_table_pos;
516 /* The tree nodes are allocated starting at
517 * decode_table[table_num_entries]. Remember that the full size of the
518 * table, including the extra space for the tree nodes, is actually
519 * 2**num_bits + 2 * num_syms slots, while table_num_entries is only
521 uint next_free_tree_slot = table_num_entries;
523 /* Go through every codeword of length greater than @num_bits. Note:
524 * the LZX format guarantees that the codeword length can be at most 16
526 for (uint code_len = num_bits + 1; code_len <= max_code_len;
530 for (uint sym = 0; sym < num_syms; sym++) {
531 if (lens[sym] != code_len)
535 /* i is the index of the current node; find it from the
536 * prefix of the current Huffman code. */
537 uint i = current_code >> (code_len - num_bits);
539 if (i >= (1 << num_bits)) {
540 ERROR("Invalid canonical Huffman code!\n");
544 /* Go through each bit of the current Huffman code
545 * beyond the prefix of length num_bits and walk the
546 * tree, "allocating" slots that have not yet been
548 for (int bit_num = num_bits + 1; bit_num <= code_len; bit_num++) {
550 /* If the current tree node points to nowhere
551 * but we need to follow it, allocate a new node
552 * for it to point to. */
553 if (decode_table[i] == 0) {
554 decode_table[i] = next_free_tree_slot;
555 decode_table[next_free_tree_slot++] = 0;
556 decode_table[next_free_tree_slot++] = 0;
561 /* Is the next bit 0 or 1? If 0, go left;
562 * otherwise, go right (by incrementing i by 1) */
563 int bit_pos = code_len - bit_num;
565 int bit = (current_code & (1 << bit_pos)) >>
570 /* i is now the index of the leaf entry into which the
571 * actual symbol will go. */
572 decode_table[i] = sym;
574 /* Increment decode_table_pos only if the prefix of the
575 * Huffman code changes. */
576 if (current_code >> (code_len - num_bits) !=
577 (current_code + 1) >> (code_len - num_bits))
580 /* current_code is always incremented because this is
581 * how canonical Huffman codes are generated (add 1 for
582 * each code, then left shift whenever the code length
589 /* If the lengths really represented a valid Huffman tree, all
590 * @table_num_entries in the table will have been filled. However, it
591 * is also possible that the tree is completely empty (as noted
592 * earlier) with all 0 lengths, and this is expected to succeed. */
594 if (decode_table_pos != table_num_entries) {
596 for (uint i = 0; i < num_syms; i++) {
598 ERROR("Lengths do not form a valid "
599 "canonical Huffman tree "
600 "(only filled %u of %u decode "
601 "table slots)!\n", decode_table_pos,
610 /* Reads a Huffman-encoded symbol when it is known there are less than
611 * MAX_CODE_LEN bits remaining in the bitstream. */
613 read_huffsym_near_end_of_input(struct input_bitstream *istream,
614 const u16 decode_table[],
620 uint bitsleft = istream->bitsleft;
625 if (table_bits > bitsleft) {
628 key_bits = bitstream_peek_bits(istream, key_size) <<
629 (table_bits - key_size);
631 key_size = table_bits;
632 bitsleft -= table_bits;
633 key_bits = bitstream_peek_bits(istream, table_bits);
636 sym = decode_table[key_bits];
637 if (sym >= num_syms) {
638 bitstream_remove_bits(istream, key_size);
641 ERROR("Input stream exhausted!\n");
644 key_bits = sym + bitstream_peek_bits(istream, 1);
645 bitstream_remove_bits(istream, 1);
647 } while ((sym = decode_table[key_bits]) >= num_syms);
649 bitstream_remove_bits(istream, lens[sym]);