4 * Code for compression shared among multiple compression formats.
6 * The following copying information applies to this specific source code file:
8 * Written in 2012-2014 by Eric Biggers <ebiggers3@gmail.com>
10 * To the extent possible under law, the author(s) have dedicated all copyright
11 * and related and neighboring rights to this software to the public domain
12 * worldwide via the Creative Commons Zero 1.0 Universal Public Domain
13 * Dedication (the "CC0").
15 * This software is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the CC0 for more details.
19 * You should have received a copy of the CC0 along with this software; if not
20 * see <http://creativecommons.org/publicdomain/zero/1.0/>.
29 #include "wimlib/compress_common.h"
30 #include "wimlib/util.h"
32 /* Given the binary tree node A[subtree_idx] whose children already
33 * satisfy the maxheap property, swap the node with its greater child
34 * until it is greater than both its children, so that the maxheap
35 * property is satisfied in the subtree rooted at A[subtree_idx]. */
37 heapify_subtree(u32 A[], unsigned length, unsigned subtree_idx)
44 parent_idx = subtree_idx;
45 while ((child_idx = parent_idx * 2) <= length) {
46 if (child_idx < length && A[child_idx + 1] > A[child_idx])
48 if (v >= A[child_idx])
50 A[parent_idx] = A[child_idx];
51 parent_idx = child_idx;
56 /* Rearrange the array 'A' so that it satisfies the maxheap property.
57 * 'A' uses 1-based indices, so the children of A[i] are A[i*2] and A[i*2 + 1].
60 heapify_array(u32 A[], unsigned length)
62 for (unsigned subtree_idx = length / 2; subtree_idx >= 1; subtree_idx--)
63 heapify_subtree(A, length, subtree_idx);
66 /* Sort the array 'A', which contains 'length' unsigned 32-bit integers. */
68 heapsort(u32 A[], unsigned length)
70 A--; /* Use 1-based indices */
72 heapify_array(A, length);
75 swap(A[1], A[length]);
77 heapify_subtree(A, length, 1);
81 #define NUM_SYMBOL_BITS 10
82 #define SYMBOL_MASK ((1 << NUM_SYMBOL_BITS) - 1)
85 * Sort the symbols primarily by frequency and secondarily by symbol
86 * value. Discard symbols with zero frequency and fill in an array with
87 * the remaining symbols, along with their frequencies. The low
88 * NUM_SYMBOL_BITS bits of each array entry will contain the symbol
89 * value, and the remaining bits will contain the frequency.
92 * Number of symbols in the alphabet.
93 * Can't be greater than (1 << NUM_SYMBOL_BITS).
96 * The frequency of each symbol.
99 * An array that eventually will hold the length of each codeword.
100 * This function only fills in the codeword lengths for symbols that
101 * have zero frequency, which are not well defined per se but will
105 * The output array, described above.
107 * Returns the number of entries in 'symout' that were filled. This is
108 * the number of symbols that have nonzero frequency.
111 sort_symbols(unsigned num_syms, const u32 freqs[restrict],
112 u8 lens[restrict], u32 symout[restrict])
114 unsigned num_used_syms;
115 unsigned num_counters;
117 /* We rely on heapsort, but with an added optimization. Since
118 * it's common for most symbol frequencies to be low, we first do
119 * a count sort using a limited number of counters. High
120 * frequencies will be counted in the last counter, and only they
121 * will be sorted with heapsort.
123 * Note: with more symbols, it is generally beneficial to have more
124 * counters. About 1 counter per 4 symbols seems fast.
126 * Note: I also tested radix sort, but even for large symbol
127 * counts (> 255) and frequencies bounded at 16 bits (enabling
128 * radix sort by just two base-256 digits), it didn't seem any
129 * faster than the method implemented here.
131 * Note: I tested the optimized quicksort implementation from
132 * glibc (with indirection overhead removed), but it was only
133 * marginally faster than the simple heapsort implemented here.
135 * Tests were done with building the codes for LZX. Results may
136 * vary for different compression algorithms...! */
138 num_counters = ALIGN(DIV_ROUND_UP(num_syms, 4), 4);
140 unsigned counters[num_counters];
142 memset(counters, 0, sizeof(counters));
144 /* Count the frequencies. */
145 for (unsigned sym = 0; sym < num_syms; sym++)
146 counters[min(freqs[sym], num_counters - 1)]++;
148 /* Make the counters cumulative, ignoring the zero-th, which
149 * counted symbols with zero frequency. As a side effect, this
150 * calculates the number of symbols with nonzero frequency. */
152 for (unsigned i = 1; i < num_counters; i++) {
153 unsigned count = counters[i];
154 counters[i] = num_used_syms;
155 num_used_syms += count;
158 /* Sort nonzero-frequency symbols using the counters. At the
159 * same time, set the codeword lengths of zero-frequency symbols
161 for (unsigned sym = 0; sym < num_syms; sym++) {
162 u32 freq = freqs[sym];
164 symout[counters[min(freq, num_counters - 1)]++] =
165 sym | (freq << NUM_SYMBOL_BITS);
171 /* Sort the symbols counted in the last counter. */
172 heapsort(symout + counters[num_counters - 2],
173 counters[num_counters - 1] - counters[num_counters - 2]);
175 return num_used_syms;
179 * Build the Huffman tree.
181 * This is an optimized implementation that
182 * (a) takes advantage of the frequencies being already sorted;
183 * (b) only generates non-leaf nodes, since the non-leaf nodes of a
184 * Huffman tree are sufficient to generate a canonical code;
185 * (c) Only stores parent pointers, not child pointers;
186 * (d) Produces the nodes in the same memory used for input
187 * frequency information.
189 * Array 'A', which contains 'sym_count' entries, is used for both input
190 * and output. For this function, 'sym_count' must be at least 2.
192 * For input, the array must contain the frequencies of the symbols,
193 * sorted in increasing order. Specifically, each entry must contain a
194 * frequency left shifted by NUM_SYMBOL_BITS bits. Any data in the low
195 * NUM_SYMBOL_BITS bits of the entries will be ignored by this function.
196 * Although these bits will, in fact, contain the symbols that correspond
197 * to the frequencies, this function is concerned with frequencies only
198 * and keeps the symbols as-is.
200 * For output, this function will produce the non-leaf nodes of the
201 * Huffman tree. These nodes will be stored in the first (sym_count - 1)
202 * entries of the array. Entry A[sym_count - 2] will represent the root
203 * node. Each other node will contain the zero-based index of its parent
204 * node in 'A', left shifted by NUM_SYMBOL_BITS bits. The low
205 * NUM_SYMBOL_BITS bits of each entry in A will be kept as-is. Again,
206 * note that although these low bits will, in fact, contain a symbol
207 * value, this symbol will have *no relationship* with the Huffman tree
208 * node that happens to occupy the same slot. This is because this
209 * implementation only generates the non-leaf nodes of the tree.
212 build_tree(u32 A[], unsigned sym_count)
214 /* Index, in 'A', of next lowest frequency symbol that has not
215 * yet been processed. */
218 /* Index, in 'A', of next lowest frequency parentless non-leaf
219 * node; or, if equal to 'e', then no such node exists yet. */
222 /* Index, in 'A', of next node to allocate as a non-leaf. */
229 /* Choose the two next lowest frequency entries. */
231 if (i != sym_count &&
232 (b == e || (A[i] >> NUM_SYMBOL_BITS) <= (A[b] >> NUM_SYMBOL_BITS)))
237 if (i != sym_count &&
238 (b == e || (A[i] >> NUM_SYMBOL_BITS) <= (A[b] >> NUM_SYMBOL_BITS)))
243 /* Allocate a non-leaf node and link the entries to it.
245 * If we link an entry that we're visiting for the first
246 * time (via index 'i'), then we're actually linking a
247 * leaf node and it will have no effect, since the leaf
248 * will be overwritten with a non-leaf when index 'e'
249 * catches up to it. But it's not any slower to
250 * unconditionally set the parent index.
252 * We also compute the frequency of the non-leaf node as
253 * the sum of its two children's frequencies. */
255 freq_shifted = (A[m] & ~SYMBOL_MASK) + (A[n] & ~SYMBOL_MASK);
257 A[m] = (A[m] & SYMBOL_MASK) | (e << NUM_SYMBOL_BITS);
258 A[n] = (A[n] & SYMBOL_MASK) | (e << NUM_SYMBOL_BITS);
259 A[e] = (A[e] & SYMBOL_MASK) | freq_shifted;
261 } while (sym_count - e > 1);
262 /* When just one entry remains, it is a "leaf" that was
263 * linked to some other node. We ignore it, since the
264 * rest of the array contains the non-leaves which we
265 * need. (Note that we're assuming the cases with 0 or 1
266 * symbols were handled separately.) */
270 * Given the stripped-down Huffman tree constructed by build_tree(),
271 * determine the number of codewords that should be assigned each
272 * possible length, taking into account the length-limited constraint.
275 * The array produced by build_tree(), containing parent index
276 * information for the non-leaf nodes of the Huffman tree. Each
277 * entry in this array is a node; a node's parent always has a
278 * greater index than that node itself. This function will
279 * overwrite the parent index information in this array, so
280 * essentially it will destroy the tree. However, the data in the
281 * low NUM_SYMBOL_BITS of each entry will be preserved.
284 * The 0-based index of the root node in 'A', and consequently one
285 * less than the number of tree node entries in 'A'. (Or, really 2
286 * less than the actual length of 'A'.)
289 * An array of length ('max_codeword_len' + 1) in which the number of
290 * codewords having each length <= max_codeword_len will be
294 * The maximum permissible codeword length.
297 compute_length_counts(u32 A[restrict], unsigned root_idx,
298 unsigned len_counts[restrict], unsigned max_codeword_len)
300 /* The key observations are:
302 * (1) We can traverse the non-leaf nodes of the tree, always
303 * visiting a parent before its children, by simply iterating
304 * through the array in reverse order. Consequently, we can
305 * compute the depth of each node in one pass, overwriting the
306 * parent indices with depths.
308 * (2) We can initially assume that in the real Huffman tree,
309 * both children of the root are leaves. This corresponds to two
310 * codewords of length 1. Then, whenever we visit a (non-leaf)
311 * node during the traversal, we modify this assumption to
312 * account for the current node *not* being a leaf, but rather
313 * its two children being leaves. This causes the loss of one
314 * codeword for the current depth and the addition of two
315 * codewords for the current depth plus one.
317 * (3) We can handle the length-limited constraint fairly easily
318 * by simply using the largest length available when a depth
319 * exceeds max_codeword_len.
322 for (unsigned len = 0; len <= max_codeword_len; len++)
326 /* Set the root node's depth to 0. */
327 A[root_idx] &= SYMBOL_MASK;
329 for (int node = root_idx - 1; node >= 0; node--) {
331 /* Calculate the depth of this node. */
333 unsigned parent = A[node] >> NUM_SYMBOL_BITS;
334 unsigned parent_depth = A[parent] >> NUM_SYMBOL_BITS;
335 unsigned depth = parent_depth + 1;
336 unsigned len = depth;
338 /* Set the depth of this node so that it is available
339 * when its children (if any) are processed. */
341 A[node] = (A[node] & SYMBOL_MASK) | (depth << NUM_SYMBOL_BITS);
343 /* If needed, decrease the length to meet the
344 * length-limited constraint. This is not the optimal
345 * method for generating length-limited Huffman codes!
346 * But it should be good enough. */
347 if (len >= max_codeword_len) {
348 len = max_codeword_len;
351 } while (len_counts[len] == 0);
354 /* Account for the fact that we have a non-leaf node at
355 * the current depth. */
357 len_counts[len + 1] += 2;
362 * Generate the codewords for a canonical Huffman code.
365 * The output array for codewords. In addition, initially this
366 * array must contain the symbols, sorted primarily by frequency and
367 * secondarily by symbol value, in the low NUM_SYMBOL_BITS bits of
371 * Output array for codeword lengths.
374 * An array that provides the number of codewords that will have
375 * each possible length <= max_codeword_len.
378 * Maximum length, in bits, of each codeword.
381 * Number of symbols in the alphabet, including symbols with zero
382 * frequency. This is the length of the 'A' and 'len' arrays.
385 gen_codewords(u32 A[restrict], u8 lens[restrict],
386 const unsigned len_counts[restrict],
387 unsigned max_codeword_len, unsigned num_syms)
389 u32 next_codewords[max_codeword_len + 1];
391 /* Given the number of codewords that will have each length,
392 * assign codeword lengths to symbols. We do this by assigning
393 * the lengths in decreasing order to the symbols sorted
394 * primarily by increasing frequency and secondarily by
395 * increasing symbol value. */
396 for (unsigned i = 0, len = max_codeword_len; len >= 1; len--) {
397 unsigned count = len_counts[len];
399 lens[A[i++] & SYMBOL_MASK] = len;
402 /* Generate the codewords themselves. We initialize the
403 * 'next_codewords' array to provide the lexicographically first
404 * codeword of each length, then assign codewords in symbol
405 * order. This produces a canonical code. */
406 next_codewords[0] = 0;
407 next_codewords[1] = 0;
408 for (unsigned len = 2; len <= max_codeword_len; len++)
409 next_codewords[len] =
410 (next_codewords[len - 1] + len_counts[len - 1]) << 1;
412 for (unsigned sym = 0; sym < num_syms; sym++)
413 A[sym] = next_codewords[lens[sym]]++;
417 * ---------------------------------------------------------------------
418 * make_canonical_huffman_code()
419 * ---------------------------------------------------------------------
421 * Given an alphabet and the frequency of each symbol in it, construct a
422 * length-limited canonical Huffman code.
425 * The number of symbols in the alphabet. The symbols are the
426 * integers in the range [0, num_syms - 1]. This parameter must be
427 * at least 2 and can't be greater than (1 << NUM_SYMBOL_BITS).
430 * The maximum permissible codeword length.
433 * An array of @num_syms entries, each of which specifies the
434 * frequency of the corresponding symbol. It is valid for some,
435 * none, or all of the frequencies to be 0.
438 * An array of @num_syms entries in which this function will return
439 * the length, in bits, of the codeword assigned to each symbol.
440 * Symbols with 0 frequency will not have codewords per se, but
441 * their entries in this array will be set to 0. No lengths greater
442 * than @max_codeword_len will be assigned.
445 * An array of @num_syms entries in which this function will return
446 * the codeword for each symbol, right-justified and padded on the
447 * left with zeroes. Codewords for symbols with 0 frequency will be
450 * ---------------------------------------------------------------------
452 * This function builds a length-limited canonical Huffman code.
454 * A length-limited Huffman code contains no codewords longer than some
455 * specified length, and has exactly (with some algorithms) or
456 * approximately (with the algorithm used here) the minimum weighted path
457 * length from the root, given this constraint.
459 * A canonical Huffman code satisfies the properties that a longer
460 * codeword never lexicographically precedes a shorter codeword, and the
461 * lexicographic ordering of codewords of the same length is the same as
462 * the lexicographic ordering of the corresponding symbols. A canonical
463 * Huffman code, or more generally a canonical prefix code, can be
464 * reconstructed from only a list containing the codeword length of each
467 * The classic algorithm to generate a Huffman code creates a node for
468 * each symbol, then inserts these nodes into a min-heap keyed by symbol
469 * frequency. Then, repeatedly, the two lowest-frequency nodes are
470 * removed from the min-heap and added as the children of a new node
471 * having frequency equal to the sum of its two children, which is then
472 * inserted into the min-heap. When only a single node remains in the
473 * min-heap, it is the root of the Huffman tree. The codeword for each
474 * symbol is determined by the path needed to reach the corresponding
475 * node from the root. Descending to the left child appends a 0 bit,
476 * whereas descending to the right child appends a 1 bit.
478 * The classic algorithm is relatively easy to understand, but it is
479 * subject to a number of inefficiencies. In practice, it is fastest to
480 * first sort the symbols by frequency. (This itself can be subject to
481 * an optimization based on the fact that most frequencies tend to be
482 * low.) At the same time, we sort secondarily by symbol value, which
483 * aids the process of generating a canonical code. Then, during tree
484 * construction, no heap is necessary because both the leaf nodes and the
485 * unparented non-leaf nodes can be easily maintained in sorted order.
486 * Consequently, there can never be more than two possibilities for the
487 * next-lowest-frequency node.
489 * In addition, because we're generating a canonical code, we actually
490 * don't need the leaf nodes of the tree at all, only the non-leaf nodes.
491 * This is because for canonical code generation we don't need to know
492 * where the symbols are in the tree. Rather, we only need to know how
493 * many leaf nodes have each depth (codeword length). And this
494 * information can, in fact, be quickly generated from the tree of
497 * Furthermore, we can build this stripped-down Huffman tree directly in
498 * the array in which the codewords are to be generated, provided that
499 * these array slots are large enough to hold a symbol and frequency
502 * Still furthermore, we don't even need to maintain explicit child
503 * pointers. We only need the parent pointers, and even those can be
504 * overwritten in-place with depth information as part of the process of
505 * extracting codeword lengths from the tree. So in summary, we do NOT
506 * need a big structure like:
508 * struct huffman_tree_node {
509 * unsigned int symbol;
510 * unsigned int frequency;
511 * unsigned int depth;
512 * struct huffman_tree_node *left_child;
513 * struct huffman_tree_node *right_child;
517 * ... which often gets used in "naive" implementations of Huffman code
520 * Most of these optimizations are based on the implementation in 7-Zip
521 * (source file: C/HuffEnc.c), which has been placed in the public domain
522 * by Igor Pavlov. But I've rewritten the code with extensive comments,
523 * as it took me a while to figure out what it was doing...!
525 * ---------------------------------------------------------------------
527 * NOTE: in general, the same frequencies can be used to generate
528 * different length-limited canonical Huffman codes. One choice we have
529 * is during tree construction, when we must decide whether to prefer a
530 * leaf or non-leaf when there is a tie in frequency. Another choice we
531 * have is how to deal with codewords that would exceed @max_codeword_len
532 * bits in length. Both of these choices affect the resulting codeword
533 * lengths, which otherwise can be mapped uniquely onto the resulting
534 * canonical Huffman code.
536 * Normally, there is no problem with choosing one valid code over
537 * another, provided that they produce similar compression ratios.
538 * However, the LZMS compression format uses adaptive Huffman coding. It
539 * requires that both the decompressor and compressor build a canonical
540 * code equivalent to that which can be generated by using the classic
541 * Huffman tree construction algorithm and always processing leaves
542 * before non-leaves when there is a frequency tie. Therefore, we make
543 * sure to do this. This method also has the advantage of sometimes
544 * shortening the longest codeword that is generated.
546 * There also is the issue of how codewords longer than @max_codeword_len
547 * are dealt with. Fortunately, for LZMS this is irrelevant because
548 * because for the LZMS alphabets no codeword can ever exceed
549 * LZMS_MAX_CODEWORD_LEN (= 15). Since the LZMS algorithm regularly
550 * halves all frequencies, the frequencies cannot become high enough for
551 * a length 16 codeword to be generated. Specifically, I think that if
552 * ties are broken in favor of non-leaves (as we do), the lowest total
553 * frequency that would give a length-16 codeword would be the sum of the
554 * frequencies 1 1 1 3 4 7 11 18 29 47 76 123 199 322 521 843 1364, which
555 * is 3570. And in LZMS we can't get a frequency that high based on the
556 * alphabet sizes, rebuild frequencies, and scaling factors. This
557 * worst-case scenario is based on the following degenerate case (only
558 * the bottom of the tree shown):
573 * Excluding the first leaves (those with value 1), each leaf value must
574 * be greater than the non-leaf up 1 and down 2 from it; otherwise that
575 * leaf would have taken precedence over that non-leaf and been combined
576 * with the leaf below, thereby decreasing the height compared to that
579 * Interesting fact: if we were to instead prioritize non-leaves over
580 * leaves, then the worst case frequencies would be the Fibonacci
581 * sequence, plus an extra frequency of 1. In this hypothetical
582 * scenario, it would be slightly easier for longer codewords to be
586 make_canonical_huffman_code(unsigned num_syms, unsigned max_codeword_len,
587 const u32 freqs[restrict],
588 u8 lens[restrict], u32 codewords[restrict])
591 unsigned num_used_syms;
593 /* We begin by sorting the symbols primarily by frequency and
594 * secondarily by symbol value. As an optimization, the array
595 * used for this purpose ('A') shares storage with the space in
596 * which we will eventually return the codewords. */
598 num_used_syms = sort_symbols(num_syms, freqs, lens, A);
600 /* 'num_used_syms' is the number of symbols with nonzero
601 * frequency. This may be less than @num_syms. 'num_used_syms'
602 * is also the number of entries in 'A' that are valid. Each
603 * entry consists of a distinct symbol and a nonzero frequency
604 * packed into a 32-bit integer. */
606 /* Handle special cases where only 0 or 1 symbols were used (had
607 * nonzero frequency). */
609 if (unlikely(num_used_syms == 0)) {
610 /* Code is empty. sort_symbols() already set all lengths
611 * to 0, so there is nothing more to do. */
615 if (unlikely(num_used_syms == 1)) {
616 /* Only one symbol was used, so we only need one
617 * codeword. But two codewords are needed to form the
618 * smallest complete Huffman code, which uses codewords 0
619 * and 1. Therefore, we choose another symbol to which
620 * to assign a codeword. We use 0 (if the used symbol is
621 * not 0) or 1 (if the used symbol is 0). In either
622 * case, the lesser-valued symbol must be assigned
623 * codeword 0 so that the resulting code is canonical. */
625 unsigned sym = A[0] & SYMBOL_MASK;
626 unsigned nonzero_idx = sym ? sym : 1;
630 codewords[nonzero_idx] = 1;
631 lens[nonzero_idx] = 1;
635 /* Build a stripped-down version of the Huffman tree, sharing the
636 * array 'A' with the symbol values. Then extract length counts
637 * from the tree and use them to generate the final codewords. */
639 build_tree(A, num_used_syms);
642 unsigned len_counts[max_codeword_len + 1];
644 compute_length_counts(A, num_used_syms - 2,
645 len_counts, max_codeword_len);
647 gen_codewords(A, lens, len_counts, max_codeword_len, num_syms);