4 * Functions used for compression.
8 * Copyright (C) 2012, 2013 Eric Biggers
10 * This file is part of wimlib, a library for working with WIM files.
12 * wimlib is free software; you can redistribute it and/or modify it under the
13 * terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 3 of the License, or (at your option)
17 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19 * A PARTICULAR PURPOSE. See the GNU General Public License for more
22 * You should have received a copy of the GNU General Public License
23 * along with wimlib; if not, see http://www.gnu.org/licenses/.
31 #include "wimlib/assert.h"
32 #include "wimlib/compress.h"
33 #include "wimlib/util.h"
39 flush_bits(struct output_bitstream *ostream)
41 *(le16*)ostream->bit_output = cpu_to_le16(ostream->bitbuf);
42 ostream->bit_output = ostream->next_bit_output;
43 ostream->next_bit_output = ostream->output;
45 ostream->num_bytes_remaining -= 2;
48 /* Writes @num_bits bits, given by the @num_bits least significant bits of
49 * @bits, to the output @ostream. */
51 bitstream_put_bits(struct output_bitstream *ostream, output_bitbuf_t bits,
56 wimlib_assert(num_bits <= 16);
57 if (num_bits <= ostream->free_bits) {
58 ostream->bitbuf = (ostream->bitbuf << num_bits) | bits;
59 ostream->free_bits -= num_bits;
62 if (ostream->num_bytes_remaining + (ostream->output -
63 ostream->bit_output) < 2)
66 /* It is tricky to output the bits correctly. The correct way
67 * is to output little-endian 2-byte words, such that the bits
68 * in the SECOND byte logically precede those in the FIRST byte.
69 * While the byte order is little-endian, the bit order is
70 * big-endian; the first bit in a byte is the high-order one.
71 * Any multi-bit numbers are in bit-big-endian form, so the
72 * low-order bit of a multi-bit number is the LAST bit to be
74 rem_bits = num_bits - ostream->free_bits;
75 ostream->bitbuf <<= ostream->free_bits;
76 ostream->bitbuf |= bits >> rem_bits;
78 ostream->free_bits = 16 - rem_bits;
79 ostream->bitbuf = bits;
85 /* Flushes any remaining bits in the output buffer to the output byte stream. */
87 flush_output_bitstream(struct output_bitstream *ostream)
89 if (ostream->num_bytes_remaining + (ostream->output -
90 ostream->bit_output) < 2)
92 if (ostream->free_bits != 16) {
93 ostream->bitbuf <<= ostream->free_bits;
99 /* Initializes an output bit buffer to write its output to the memory location
100 * pointer to by @data. */
102 init_output_bitstream(struct output_bitstream *ostream,
103 void *data, unsigned num_bytes)
105 wimlib_assert(num_bytes >= 4);
108 ostream->free_bits = 16;
109 ostream->bit_output = data;
110 ostream->next_bit_output = data + 2;
111 ostream->output = data + 4;
112 ostream->num_bytes_remaining = num_bytes - 4;
124 typedef struct HuffmanIntermediateNode {
125 HuffmanNode node_base;
126 HuffmanNode *left_child;
127 HuffmanNode *right_child;
128 } HuffmanIntermediateNode;
131 /* Comparator function for HuffmanNodes. Sorts primarily by symbol
132 * frequency and secondarily by symbol value. */
134 cmp_nodes_by_freq(const void *_leaf1, const void *_leaf2)
136 const HuffmanNode *leaf1 = _leaf1;
137 const HuffmanNode *leaf2 = _leaf2;
139 int freq_diff = (int)leaf1->freq - (int)leaf2->freq;
142 return (int)leaf1->sym - (int)leaf2->sym;
147 /* Comparator function for HuffmanNodes. Sorts primarily by code length and
148 * secondarily by symbol value. */
150 cmp_nodes_by_code_len(const void *_leaf1, const void *_leaf2)
152 const HuffmanNode *leaf1 = _leaf1;
153 const HuffmanNode *leaf2 = _leaf2;
155 int code_len_diff = (int)leaf1->path_len - (int)leaf2->path_len;
157 if (code_len_diff == 0)
158 return (int)leaf1->sym - (int)leaf2->sym;
160 return code_len_diff;
163 #define INVALID_SYMBOL 0xffff
165 /* Recursive function to calculate the depth of the leaves in a Huffman tree.
168 huffman_tree_compute_path_lengths(HuffmanNode *base_node, u16 cur_len)
170 if (base_node->sym == INVALID_SYMBOL) {
171 /* Intermediate node. */
172 HuffmanIntermediateNode *node = (HuffmanIntermediateNode*)base_node;
173 huffman_tree_compute_path_lengths(node->left_child, cur_len + 1);
174 huffman_tree_compute_path_lengths(node->right_child, cur_len + 1);
177 base_node->path_len = cur_len;
181 /* make_canonical_huffman_code: - Creates a canonical Huffman code from an array
182 * of symbol frequencies.
184 * The algorithm used is similar to the well-known algorithm that builds a
185 * Huffman tree using a minheap. In that algorithm, the leaf nodes are
186 * initialized and inserted into the minheap with the frequency as the key.
187 * Repeatedly, the top two nodes (nodes with the lowest frequency) are taken out
188 * of the heap and made the children of a new node that has a frequency equal to
189 * the sum of the two frequencies of its children. This new node is inserted
190 * into the heap. When all the nodes have been removed from the heap, what
191 * remains is the Huffman tree. The Huffman code for a symbol is given by the
192 * path to it in the tree, where each left pointer is mapped to a 0 bit and each
193 * right pointer is mapped to a 1 bit.
195 * The algorithm used here uses an optimization that removes the need to
196 * actually use a heap. The leaf nodes are first sorted by frequency, as
197 * opposed to being made into a heap. Note that this sorting step takes O(n log
198 * n) time vs. O(n) time for heapifying the array, where n is the number of
199 * symbols. However, the heapless method is probably faster overall, due to the
200 * time saved later. In the heapless method, whenever an intermediate node is
201 * created, it is not inserted into the sorted array. Instead, the intermediate
202 * nodes are kept in a separate array, which is easily kept sorted because every
203 * time an intermediate node is initialized, it will have a frequency at least
204 * as high as that of the previous intermediate node that was initialized. So
205 * whenever we want the 2 nodes, leaf or intermediate, that have the lowest
206 * frequency, we check the low-frequency ends of both arrays, which is an O(1)
209 * The function builds a canonical Huffman code, not just any Huffman code. A
210 * Huffman code is canonical if the codeword for each symbol numerically
211 * precedes the codeword for all other symbols of the same length that are
212 * numbered higher than the symbol, and additionally, all shorter codewords,
213 * 0-extended, numerically precede longer codewords. A canonical Huffman code
214 * is useful because it can be reconstructed by only knowing the path lengths in
215 * the tree. See the make_huffman_decode_table() function to see how to
216 * reconstruct a canonical Huffman code from only the lengths of the codes.
218 * @num_syms: The number of symbols in the alphabet.
220 * @max_codeword_len: The maximum allowed length of a codeword in the code.
221 * Note that if the code being created runs up against
222 * this restriction, the code ultimately created will be
223 * suboptimal, although there are some advantages for
224 * limiting the length of the codewords.
226 * @freq_tab: An array of length @num_syms that contains the frequencies
227 * of each symbol in the uncompressed data.
229 * @lens: An array of length @num_syms into which the lengths of the
230 * codewords for each symbol will be written.
232 * @codewords: An array of @num_syms short integers into which the
233 * codewords for each symbol will be written. The first
234 * lens[i] bits of codewords[i] will contain the codeword
238 make_canonical_huffman_code(unsigned num_syms,
239 unsigned max_codeword_len,
240 const freq_t freq_tab[restrict],
242 u16 codewords[restrict])
244 /* We require at least 2 possible symbols in the alphabet to produce a
245 * valid Huffman decoding table. It is allowed that fewer than 2 symbols
246 * are actually used, though. */
247 wimlib_assert(num_syms >= 2);
249 /* Initialize the lengths and codewords to 0 */
250 memset(lens, 0, num_syms * sizeof(lens[0]));
251 memset(codewords, 0, num_syms * sizeof(codewords[0]));
253 /* Calculate how many symbols have non-zero frequency. These are the
254 * symbols that actually appeared in the input. */
255 unsigned num_used_symbols = 0;
256 for (unsigned i = 0; i < num_syms; i++)
257 if (freq_tab[i] != 0)
261 /* It is impossible to make a code for num_used_symbols symbols if there
262 * aren't enough code bits to uniquely represent all of them. */
263 wimlib_assert((1 << max_codeword_len) > num_used_symbols);
265 /* Initialize the array of leaf nodes with the symbols and their
267 HuffmanNode leaves[num_used_symbols];
268 unsigned leaf_idx = 0;
269 for (unsigned i = 0; i < num_syms; i++) {
270 if (freq_tab[i] != 0) {
271 leaves[leaf_idx].freq = freq_tab[i];
272 leaves[leaf_idx].sym = i;
273 leaves[leaf_idx].height = 0;
278 /* Deal with the special cases where num_used_symbols < 2. */
279 if (num_used_symbols < 2) {
280 if (num_used_symbols == 0) {
281 /* If num_used_symbols is 0, there are no symbols in the
282 * input, so it must be empty. This should be an error,
283 * but the LZX format expects this case to succeed. All
284 * the codeword lengths are simply marked as 0 (which
285 * was already done.) */
287 /* If only one symbol is present, the LZX format
288 * requires that the Huffman code include two codewords.
289 * One is not used. Note that this doesn't make the
290 * encoded data take up more room anyway, since binary
291 * data itself has 2 symbols. */
293 unsigned sym = leaves[0].sym;
298 /* dummy symbol is 1, real symbol is 0 */
302 /* dummy symbol is 0, real symbol is sym */
310 /* Otherwise, there are at least 2 symbols in the input, so we need to
311 * find a real Huffman code. */
314 /* Declare the array of intermediate nodes. An intermediate node is not
315 * associated with a symbol. Instead, it represents some binary code
316 * prefix that is shared between at least 2 codewords. There can be at
317 * most num_used_symbols - 1 intermediate nodes when creating a Huffman
318 * code. This is because if there were at least num_used_symbols nodes,
319 * the code would be suboptimal because there would be at least one
320 * unnecessary intermediate node.
322 * The worst case (greatest number of intermediate nodes) would be if
323 * all the intermediate nodes were chained together. This results in
324 * num_used_symbols - 1 intermediate nodes. If num_used_symbols is at
325 * least 17, this configuration would not be allowed because the LZX
326 * format constrains codes to 16 bits or less each. However, it is
327 * still possible for there to be more than 16 intermediate nodes, as
328 * long as no leaf has a depth of more than 16. */
329 HuffmanIntermediateNode inodes[num_used_symbols - 1];
332 /* Pointer to the leaf node of lowest frequency that hasn't already been
333 * added as the child of some intermediate note. */
334 HuffmanNode *cur_leaf;
336 /* Pointer past the end of the array of leaves. */
337 HuffmanNode *end_leaf = &leaves[num_used_symbols];
339 /* Pointer to the intermediate node of lowest frequency. */
340 HuffmanIntermediateNode *cur_inode;
342 /* Pointer to the next unallocated intermediate node. */
343 HuffmanIntermediateNode *next_inode;
345 /* Only jump back to here if the maximum length of the codewords allowed
346 * by the LZX format (16 bits) is exceeded. */
347 try_building_tree_again:
349 /* Sort the leaves from those that correspond to the least frequent
350 * symbol, to those that correspond to the most frequent symbol. If two
351 * leaves have the same frequency, they are sorted by symbol. */
352 qsort(leaves, num_used_symbols, sizeof(leaves[0]), cmp_nodes_by_freq);
354 cur_leaf = &leaves[0];
355 cur_inode = &inodes[0];
356 next_inode = &inodes[0];
358 /* The following loop takes the two lowest frequency nodes of those
359 * remaining and makes them the children of the next available
360 * intermediate node. It continues until all the leaf nodes and
361 * intermediate nodes have been used up, or the maximum allowed length
362 * for the codewords is exceeded. For the latter case, we must adjust
363 * the frequencies to be more equal and then execute this loop again. */
366 /* Lowest frequency node. */
369 /* Second lowest frequency node. */
372 /* Get the lowest and second lowest frequency nodes from the
373 * remaining leaves or from the intermediate nodes. */
375 if (cur_leaf != end_leaf && (cur_inode == next_inode ||
376 cur_leaf->freq <= cur_inode->node_base.freq)) {
378 } else if (cur_inode != next_inode) {
379 f1 = (HuffmanNode*)cur_inode++;
382 if (cur_leaf != end_leaf && (cur_inode == next_inode ||
383 cur_leaf->freq <= cur_inode->node_base.freq)) {
385 } else if (cur_inode != next_inode) {
386 f2 = (HuffmanNode*)cur_inode++;
388 /* All nodes used up! */
392 /* next_inode becomes the parent of f1 and f2. */
394 next_inode->node_base.freq = f1->freq + f2->freq;
395 next_inode->node_base.sym = INVALID_SYMBOL;
396 next_inode->left_child = f1;
397 next_inode->right_child = f2;
399 /* We need to keep track of the height so that we can detect if
400 * the length of a codeword has execeed max_codeword_len. The
401 * parent node has a height one higher than the maximum height
402 * of its children. */
403 next_inode->node_base.height = max(f1->height, f2->height) + 1;
405 /* Check to see if the code length of the leaf farthest away
406 * from next_inode has exceeded the maximum code length. */
407 if (next_inode->node_base.height > max_codeword_len) {
408 /* The code lengths can be made more uniform by making
409 * the frequencies more uniform. Divide all the
410 * frequencies by 2, leaving 1 as the minimum frequency.
411 * If this keeps happening, the symbol frequencies will
412 * approach equality, which makes their Huffman
413 * codewords approach the length
414 * log_2(num_used_symbols).
416 for (unsigned i = 0; i < num_used_symbols; i++)
417 if (leaves[i].freq > 1)
418 leaves[i].freq >>= 1;
419 goto try_building_tree_again;
424 /* The Huffman tree is now complete, and its height is no more than
425 * max_codeword_len. */
427 HuffmanIntermediateNode *root = next_inode - 1;
428 wimlib_assert(root->node_base.height <= max_codeword_len);
430 /* Compute the path lengths for the leaf nodes. */
431 huffman_tree_compute_path_lengths(&root->node_base, 0);
433 /* Sort the leaf nodes primarily by code length and secondarily by
435 qsort(leaves, num_used_symbols, sizeof(leaves[0]), cmp_nodes_by_code_len);
437 u16 cur_codeword = 0;
438 unsigned cur_codeword_len = 0;
439 for (unsigned i = 0; i < num_used_symbols; i++) {
441 /* Each time a codeword becomes one longer, the current codeword
442 * is left shifted by one place. This is part of the procedure
443 * for enumerating the canonical Huffman code. Additionally,
444 * whenever a codeword is used, 1 is added to the current
447 unsigned len_diff = leaves[i].path_len - cur_codeword_len;
448 cur_codeword <<= len_diff;
449 cur_codeword_len += len_diff;
451 u16 sym = leaves[i].sym;
452 codewords[sym] = cur_codeword;
453 lens[sym] = cur_codeword_len;