X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=include%2Fwimlib%2Fdecompress_common.h;h=d396120763d917582ff29a30b9cd1ebd2e731a4c;hp=420f7f41e3adf8a207248241a41aee0db3ee3879;hb=908381d2809a48acd9490ec080e51087ae1529fd;hpb=141ce5b36cf27f8b1a78f17d070f6a627105aabd diff --git a/include/wimlib/decompress_common.h b/include/wimlib/decompress_common.h index 420f7f41..d3961207 100644 --- a/include/wimlib/decompress_common.h +++ b/include/wimlib/decompress_common.h @@ -3,17 +3,36 @@ * * Header for decompression code shared by multiple compression formats. * - * The author dedicates this file to the public domain. - * You can do whatever you want with this file. + * The following copying information applies to this specific source code file: + * + * Written in 2012-2016 by Eric Biggers + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide via the Creative Commons Zero 1.0 Universal Public Domain + * Dedication (the "CC0"). + * + * This software is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the CC0 for more details. + * + * You should have received a copy of the CC0 along with this software; if not + * see . */ #ifndef _WIMLIB_DECOMPRESS_COMMON_H #define _WIMLIB_DECOMPRESS_COMMON_H +#include + #include "wimlib/compiler.h" #include "wimlib/types.h" #include "wimlib/unaligned.h" +/******************************************************************************/ +/* Input bitstream for XPRESS and LZX */ +/*----------------------------------------------------------------------------*/ + /* Structure that encapsulates a block of in-memory data being interpreted as a * stream of bits, optionally with interwoven literal bytes. Bits are assumed * to be stored in little endian 16-bit coding units, with the bits ordered high @@ -65,7 +84,7 @@ bitstream_ensure_bits(struct input_bitstream *is, const unsigned num_bits) if (unlikely(is->end - is->next < 2)) goto overflow; - is->bitbuf |= (u32)get_unaligned_u16_le(is->next) << (16 - is->bitsleft); + is->bitbuf |= (u32)get_unaligned_le16(is->next) << (16 - is->bitsleft); is->next += 2; is->bitsleft += 16; @@ -73,7 +92,7 @@ bitstream_ensure_bits(struct input_bitstream *is, const unsigned num_bits) if (unlikely(is->end - is->next < 2)) goto overflow; - is->bitbuf |= (u32)get_unaligned_u16_le(is->next); + is->bitbuf |= (u32)get_unaligned_le16(is->next); is->next += 2; is->bitsleft = 32; } @@ -90,9 +109,7 @@ overflow: static inline u32 bitstream_peek_bits(const struct input_bitstream *is, const unsigned num_bits) { - if (unlikely(num_bits == 0)) - return 0; - return is->bitbuf >> (32 - num_bits); + return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1); } /* Remove @num_bits from the bitstream. There must be at least @num_bits @@ -141,7 +158,7 @@ bitstream_read_u16(struct input_bitstream *is) if (unlikely(is->end - is->next < 2)) return 0; - v = get_unaligned_u16_le(is->next); + v = get_unaligned_le16(is->next); is->next += 2; return v; } @@ -154,23 +171,21 @@ bitstream_read_u32(struct input_bitstream *is) if (unlikely(is->end - is->next < 4)) return 0; - v = get_unaligned_u32_le(is->next); + v = get_unaligned_le32(is->next); is->next += 4; return v; } -/* Read an array of literal bytes embedded in the bitstream. Return a pointer - * to the resulting array, or NULL if the read overflows the input buffer. */ -static inline const u8 * -bitstream_read_bytes(struct input_bitstream *is, size_t count) +/* Read into @dst_buffer an array of literal bytes embedded in the bitstream. + * Return 0 if there were enough bytes remaining in the input, otherwise -1. */ +static inline int +bitstream_read_bytes(struct input_bitstream *is, void *dst_buffer, size_t count) { - const u8 *p; - if (unlikely(is->end - is->next < count)) - return NULL; - p = is->next; + return -1; + memcpy(dst_buffer, is->next, count); is->next += count; - return p; + return 0; } /* Align the input bitstream on a coding-unit boundary. */ @@ -181,76 +196,217 @@ bitstream_align(struct input_bitstream *is) is->bitbuf = 0; } -/* Needed alignment of decode_table parameter to make_huffman_decode_table(). - * - * Reason: We may fill the entries with SSE instructions without worrying - * about dealing with the unaligned case. */ +/******************************************************************************/ +/* Huffman decoding */ +/*----------------------------------------------------------------------------*/ + +/* + * Required alignment for the Huffman decode tables. We require this alignment + * so that we can fill the entries with vector or word instructions and not have + * to deal with misaligned buffers. + */ #define DECODE_TABLE_ALIGNMENT 16 -/* Maximum supported symbol count for make_huffman_decode_table(). +/* + * Each decode table entry is 16 bits divided into two fields: 'symbol' (high 12 + * bits) and 'length' (low 4 bits). The precise meaning of these fields depends + * on the type of entry: * - * Reason: In direct mapping entries, we store the symbol in 11 bits. */ -#define DECODE_TABLE_MAX_SYMBOLS 2048 - -/* Maximum supported table bits for make_huffman_decode_table(). + * Root table entries which are *not* subtable pointers: + * symbol: symbol to decode + * length: codeword length in bits + * + * Root table entries which are subtable pointers: + * symbol: index of start of subtable + * length: number of bits with which the subtable is indexed * - * Reason: In internal binary tree nodes, offsets are encoded in 14 bits. - * But the real limit is 13, because we allocate entries past the end of - * the direct lookup part of the table for binary tree nodes. (Note: if - * needed this limit could be removed by encoding the offsets relative to - * &decode_table[1 << table_bits].) */ -#define DECODE_TABLE_MAX_TABLE_BITS 13 - -/* Maximum supported codeword length for make_huffman_decode_table(). + * Subtable entries: + * symbol: symbol to decode + * length: codeword length in bits, minus the number of bits with which the + * root table is indexed + */ +#define DECODE_TABLE_SYMBOL_SHIFT 4 +#define DECODE_TABLE_MAX_SYMBOL ((1 << (16 - DECODE_TABLE_SYMBOL_SHIFT)) - 1) +#define DECODE_TABLE_MAX_LENGTH ((1 << DECODE_TABLE_SYMBOL_SHIFT) - 1) +#define DECODE_TABLE_LENGTH_MASK DECODE_TABLE_MAX_LENGTH +#define MAKE_DECODE_TABLE_ENTRY(symbol, length) \ + (((symbol) << DECODE_TABLE_SYMBOL_SHIFT) | (length)) + +/* + * Read and return the next Huffman-encoded symbol from the given bitstream + * using the given decode table. * - * Reason: In direct mapping entries, we encode the codeword length in 5 - * bits, and the top 2 bits can't both be set because that has special - * meaning. */ -#define DECODE_TABLE_MAX_CODEWORD_LEN 23 - -/* Reads and returns the next Huffman-encoded symbol from a bitstream. If the - * input data is exhausted, the Huffman symbol is decoded as if the missing bits - * are all zeroes. + * If the input data is exhausted, then the Huffman symbol will be decoded as if + * the missing bits were all zeroes. * * XXX: This is mostly duplicated in lzms_decode_huffman_symbol() in - * lzms_decompress.c. */ + * lzms_decompress.c; keep them in sync! + */ static inline unsigned -read_huffsym(struct input_bitstream *istream, const u16 decode_table[], +read_huffsym(struct input_bitstream *is, const u16 decode_table[], unsigned table_bits, unsigned max_codeword_len) { unsigned entry; - unsigned key_bits; - - bitstream_ensure_bits(istream, max_codeword_len); - - /* Index the decode table by the next table_bits bits of the input. */ - key_bits = bitstream_peek_bits(istream, table_bits); - entry = decode_table[key_bits]; - if (likely(entry < 0xC000)) { - /* Fast case: The decode table directly provided the - * symbol and codeword length. The low 11 bits are the - * symbol, and the high 5 bits are the codeword length. */ - bitstream_remove_bits(istream, entry >> 11); - return entry & 0x7FF; - } else { - /* Slow case: The codeword for the symbol is longer than - * table_bits, so the symbol does not have an entry - * directly in the first (1 << table_bits) entries of the - * decode table. Traverse the appropriate binary tree - * bit-by-bit to decode the symbol. */ - bitstream_remove_bits(istream, table_bits); - do { - key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1); - } while ((entry = decode_table[key_bits]) >= 0xC000); - return entry; + unsigned symbol; + unsigned length; + + /* Preload the bitbuffer with 'max_codeword_len' bits so that we're + * guaranteed to be able to fully decode a codeword. */ + bitstream_ensure_bits(is, max_codeword_len); + + /* Index the root table by the next 'table_bits' bits of input. */ + entry = decode_table[bitstream_peek_bits(is, table_bits)]; + + /* Extract the "symbol" and "length" from the entry. */ + symbol = entry >> DECODE_TABLE_SYMBOL_SHIFT; + length = entry & DECODE_TABLE_LENGTH_MASK; + + /* If the root table is indexed by the full 'max_codeword_len' bits, + * then there cannot be any subtables, and this will be known at compile + * time. Otherwise, we must check whether the decoded symbol is really + * a subtable pointer. If so, we must discard the bits with which the + * root table was indexed, then index the subtable by the next 'length' + * bits of input to get the real entry. */ + if (max_codeword_len > table_bits && + entry >= (1U << (table_bits + DECODE_TABLE_SYMBOL_SHIFT))) + { + /* Subtable required */ + bitstream_remove_bits(is, table_bits); + entry = decode_table[symbol + bitstream_peek_bits(is, length)]; + symbol = entry >> DECODE_TABLE_SYMBOL_SHIFT; + length = entry & DECODE_TABLE_LENGTH_MASK; } + + /* Discard the bits (or the remaining bits, if a subtable was required) + * of the codeword. */ + bitstream_remove_bits(is, length); + + /* Return the decoded symbol. */ + return symbol; } +/* + * The DECODE_TABLE_ENOUGH() macro evaluates to the maximum number of decode + * table entries, including all subtable entries, that may be required for + * decoding a given Huffman code. This depends on three parameters: + * + * num_syms: the maximum number of symbols in the code + * table_bits: the number of bits with which the root table will be indexed + * max_codeword_len: the maximum allowed codeword length in the code + * + * Given these parameters, the utility program 'enough' from zlib, when passed + * the three arguments 'num_syms', 'table_bits', and 'max_codeword_len', will + * compute the maximum number of entries required. This has already been done + * for the combinations we need and incorporated into the macro below so that + * the mapping can be done at compilation time. If an unknown combination is + * used, then a compilation error will result. To fix this, use 'enough' to + * find the missing value and add it below. If that still doesn't fix the + * compilation error, then most likely a constraint would be violated by the + * requested parameters, so they cannot be used, at least without other changes + * to the decode table --- see DECODE_TABLE_SIZE(). + */ +#define DECODE_TABLE_ENOUGH(num_syms, table_bits, max_codeword_len) ( \ + ((num_syms) == 8 && (table_bits) == 7 && (max_codeword_len) == 15) ? 128 : \ + ((num_syms) == 8 && (table_bits) == 5 && (max_codeword_len) == 7) ? 36 : \ + ((num_syms) == 8 && (table_bits) == 6 && (max_codeword_len) == 7) ? 66 : \ + ((num_syms) == 8 && (table_bits) == 7 && (max_codeword_len) == 7) ? 128 : \ + ((num_syms) == 20 && (table_bits) == 5 && (max_codeword_len) == 15) ? 1062 : \ + ((num_syms) == 20 && (table_bits) == 6 && (max_codeword_len) == 15) ? 582 : \ + ((num_syms) == 20 && (table_bits) == 7 && (max_codeword_len) == 15) ? 390 : \ + ((num_syms) == 54 && (table_bits) == 9 && (max_codeword_len) == 15) ? 618 : \ + ((num_syms) == 54 && (table_bits) == 10 && (max_codeword_len) == 15) ? 1098 : \ + ((num_syms) == 249 && (table_bits) == 9 && (max_codeword_len) == 16) ? 878 : \ + ((num_syms) == 249 && (table_bits) == 10 && (max_codeword_len) == 16) ? 1326 : \ + ((num_syms) == 249 && (table_bits) == 11 && (max_codeword_len) == 16) ? 2318 : \ + ((num_syms) == 256 && (table_bits) == 9 && (max_codeword_len) == 15) ? 822 : \ + ((num_syms) == 256 && (table_bits) == 10 && (max_codeword_len) == 15) ? 1302 : \ + ((num_syms) == 256 && (table_bits) == 11 && (max_codeword_len) == 15) ? 2310 : \ + ((num_syms) == 512 && (table_bits) == 10 && (max_codeword_len) == 15) ? 1558 : \ + ((num_syms) == 512 && (table_bits) == 11 && (max_codeword_len) == 15) ? 2566 : \ + ((num_syms) == 512 && (table_bits) == 12 && (max_codeword_len) == 15) ? 4606 : \ + ((num_syms) == 656 && (table_bits) == 10 && (max_codeword_len) == 16) ? 1734 : \ + ((num_syms) == 656 && (table_bits) == 11 && (max_codeword_len) == 16) ? 2726 : \ + ((num_syms) == 656 && (table_bits) == 12 && (max_codeword_len) == 16) ? 4758 : \ + ((num_syms) == 799 && (table_bits) == 9 && (max_codeword_len) == 15) ? 1366 : \ + ((num_syms) == 799 && (table_bits) == 10 && (max_codeword_len) == 15) ? 1846 : \ + ((num_syms) == 799 && (table_bits) == 11 && (max_codeword_len) == 15) ? 2854 : \ + -1) + +/* Wrapper around DECODE_TABLE_ENOUGH() that does additional compile-time + * validation. */ +#define DECODE_TABLE_SIZE(num_syms, table_bits, max_codeword_len) ( \ + \ + /* All values must be positive. */ \ + STATIC_ASSERT_ZERO((num_syms) > 0) + \ + STATIC_ASSERT_ZERO((table_bits) > 0) + \ + STATIC_ASSERT_ZERO((max_codeword_len) > 0) + \ + \ + /* There cannot be more symbols than possible codewords. */ \ + STATIC_ASSERT_ZERO((num_syms) <= 1U << (max_codeword_len)) + \ + \ + /* There is no reason for the root table to be indexed with + * more bits than the maximum codeword length. */ \ + STATIC_ASSERT_ZERO((table_bits) <= (max_codeword_len)) + \ + \ + /* The maximum symbol value must fit in the 'symbol' field. */ \ + STATIC_ASSERT_ZERO((num_syms) - 1 <= DECODE_TABLE_MAX_SYMBOL) + \ + \ + /* The maximum codeword length in the root table must fit in + * the 'length' field. */ \ + STATIC_ASSERT_ZERO((table_bits) <= DECODE_TABLE_MAX_LENGTH) + \ + \ + /* The maximum codeword length in a subtable must fit in the + * 'length' field. */ \ + STATIC_ASSERT_ZERO((max_codeword_len) - (table_bits) <= \ + DECODE_TABLE_MAX_LENGTH) + \ + \ + /* The minimum subtable index must be greater than the maximum + * symbol value. If this were not the case, then there would + * be no way to tell whether a given root table entry is a + * "subtable pointer" or not. (An alternate solution would be + * to reserve a flag bit specifically for this purpose.) */ \ + STATIC_ASSERT_ZERO((1U << table_bits) > (num_syms) - 1) + \ + \ + /* The needed 'enough' value must have been defined. */ \ + STATIC_ASSERT_ZERO(DECODE_TABLE_ENOUGH( \ + (num_syms), (table_bits), \ + (max_codeword_len)) > 0) + \ + \ + /* The maximum subtable index must fit in the 'symbol' field. */\ + STATIC_ASSERT_ZERO(DECODE_TABLE_ENOUGH( \ + (num_syms), (table_bits), \ + (max_codeword_len)) - 1 <= \ + DECODE_TABLE_MAX_SYMBOL) + \ + \ + /* Finally, make the macro evaluate to the needed maximum + * number of decode table entries. */ \ + DECODE_TABLE_ENOUGH((num_syms), (table_bits), \ + (max_codeword_len)) \ +) + +/* + * Declare the decode table for a Huffman code, given several compile-time + * constants that describe the code. See DECODE_TABLE_ENOUGH() for details. + * + * Decode tables must be aligned to a DECODE_TABLE_ALIGNMENT-byte boundary. + * This implies that if a decode table is nested inside a dynamically allocated + * structure, then the outer structure must be allocated on a + * DECODE_TABLE_ALIGNMENT-byte aligned boundary as well. + */ +#define DECODE_TABLE(name, num_syms, table_bits, max_codeword_len) \ + u16 name[DECODE_TABLE_SIZE((num_syms), (table_bits), \ + (max_codeword_len))] \ + _aligned_attribute(DECODE_TABLE_ALIGNMENT) + extern int make_huffman_decode_table(u16 decode_table[], unsigned num_syms, - unsigned num_bits, const u8 lens[], + unsigned table_bits, const u8 lens[], unsigned max_codeword_len); +/******************************************************************************/ +/* LZ match copying */ +/*----------------------------------------------------------------------------*/ + static inline void copy_word_unaligned(const void *src, void *dst) { @@ -258,19 +414,22 @@ copy_word_unaligned(const void *src, void *dst) } static inline machine_word_t -repeat_byte(u8 b) +repeat_u16(u16 b) { - machine_word_t v; + machine_word_t v = b; - BUILD_BUG_ON(WORDSIZE != 4 && WORDSIZE != 8); - - v = b; - v |= v << 8; + STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64); v |= v << 16; - v |= v << ((WORDSIZE == 8) ? 32 : 0); + v |= v << ((WORDBITS == 64) ? 32 : 0); return v; } +static inline machine_word_t +repeat_byte(u8 b) +{ + return repeat_u16(((u16)b << 8) | b); +} + /* * Copy an LZ77 match at (dst - offset) to dst. * @@ -298,13 +457,11 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length) * example, if a word is 8 bytes and the match is of length 5, then * we'll simply copy 8 bytes. This is okay as long as we don't write * beyond the end of the output buffer, hence the check for (winend - - * end >= WORDSIZE - 1). + * end >= WORDBYTES - 1). */ - if (UNALIGNED_ACCESS_IS_VERY_FAST && - likely(winend - end >= WORDSIZE - 1)) - { + if (UNALIGNED_ACCESS_IS_FAST && likely(winend - end >= WORDBYTES - 1)) { - if (offset >= WORDSIZE) { + if (offset >= WORDBYTES) { /* The source and destination words don't overlap. */ /* To improve branch prediction, one iteration of this @@ -314,14 +471,14 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length) * and we'll need to continue copying. */ copy_word_unaligned(src, dst); - src += WORDSIZE; - dst += WORDSIZE; + src += WORDBYTES; + dst += WORDBYTES; if (dst < end) { do { copy_word_unaligned(src, dst); - src += WORDSIZE; - dst += WORDSIZE; + src += WORDBYTES; + dst += WORDBYTES; } while (dst < end); } return; @@ -334,19 +491,19 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length) machine_word_t v = repeat_byte(*(dst - 1)); do { store_word_unaligned(v, dst); - src += WORDSIZE; - dst += WORDSIZE; + src += WORDBYTES; + dst += WORDBYTES; } while (dst < end); return; } /* * We don't bother with special cases for other 'offset < - * WORDSIZE', which are usually rarer than 'offset == 1'. Extra - * checks will just slow things down. Actually, it's possible - * to handle all the 'offset < WORDSIZE' cases using the same - * code, but it still becomes more complicated doesn't seem any - * faster overall; it definitely slows down the more common - * 'offset == 1' case. + * WORDBYTES', which are usually rarer than 'offset == 1'. + * Extra checks will just slow things down. Actually, it's + * possible to handle all the 'offset < WORDBYTES' cases using + * the same code, but it still becomes more complicated doesn't + * seem any faster overall; it definitely slows down the more + * common 'offset == 1' case. */ }