* decompress_common.h
*
* Header for decompression code shared by multiple compression formats.
+ *
+ * The author dedicates this file to the public domain.
+ * You can do whatever you want with this file.
*/
#ifndef _WIMLIB_DECOMPRESS_COMMON_H
#define _WIMLIB_DECOMPRESS_COMMON_H
-#include "wimlib/assert.h"
+#include <string.h>
+
#include "wimlib/compiler.h"
-#include "wimlib/error.h"
-#include "wimlib/endianness.h"
#include "wimlib/types.h"
+#include "wimlib/unaligned.h"
-#ifndef INPUT_IDX_T_DEFINED
-#define INPUT_IDX_T_DEFINED
-typedef u32 input_idx_t;
-#endif
-
-/* Structure to encapsulate a block of in-memory data that is being interpreted
- * as a stream of bits.
- *
- * This is geared specifically towards the XPRESS and LZX compression formats
- * with regards to the actual ordering the bits within the byte sequence. */
+/* Structure that encapsulates a block of in-memory data being interpreted as a
+ * stream of bits, optionally with interwoven literal bytes. Bits are assumed
+ * to be stored in little endian 16-bit coding units, with the bits ordered high
+ * to low. */
struct input_bitstream {
- /* A variable of length at least 32 bits that is used to hold bits that
- * have been read from the stream. The bits are ordered from high-order
- * to low-order, and the next bit is always the high-order bit. */
+ /* Bits that have been read from the input buffer. The bits are
+ * left-justified; the next bit is always bit 31. */
u32 bitbuf;
- /* Number of bits in @bitbuf that are valid. */
- unsigned bitsleft;
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitsleft;
- /* Pointer to the next byte to be retrieved from the input. */
- const u8 *data;
+ /* Pointer to the next byte to be retrieved from the input buffer. */
+ const u8 *next;
- /* Number of bytes of data that are left. */
- input_idx_t data_bytes_left;
+ /* Pointer past the end of the input buffer. */
+ const u8 *end;
};
-/* Initializes a bitstream to receive its input from @data. */
+/* Initialize a bitstream to read from the specified input buffer. */
static inline void
-init_input_bitstream(struct input_bitstream *istream,
- const void *data, input_idx_t num_data_bytes)
+init_input_bitstream(struct input_bitstream *is, const void *buffer, u32 size)
{
- istream->bitbuf = 0;
- istream->bitsleft = 0;
- istream->data = data;
- istream->data_bytes_left = num_data_bytes;
+ is->bitbuf = 0;
+ is->bitsleft = 0;
+ is->next = buffer;
+ is->end = is->next + size;
}
-/* Ensures the bit buffer variable for the bitstream contains at least @num_bits
+/* Note: for performance reasons, the following methods don't return error codes
+ * to the caller if the input buffer is overrun. Instead, they just assume that
+ * all overrun data is zeroes. This has no effect on well-formed compressed
+ * data. The only disadvantage is that bad compressed data may go undetected,
+ * but even this is irrelevant if higher level code checksums the uncompressed
+ * data anyway. */
+
+/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
* bits. Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
- * may be called on the bitstream to peek or remove up to @num_bits bits.
- *
- * If the input data is exhausted, any further bits are assumed to be 0. */
+ * may be called on the bitstream to peek or remove up to @num_bits bits. */
static inline void
-bitstream_ensure_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_ensure_bits(struct input_bitstream *is, const unsigned num_bits)
{
- for (int nbits = num_bits; (int)istream->bitsleft < nbits; nbits -= 16) {
- u16 nextword;
- unsigned shift;
+ /* This currently works for at most 17 bits. */
- if (unlikely(istream->data_bytes_left < 2)) {
- istream->bitsleft = num_bits;
- return;
- }
+ if (is->bitsleft >= num_bits)
+ return;
+
+ if (unlikely(is->end - is->next < 2))
+ goto overflow;
+
+ is->bitbuf |= (u32)get_unaligned_u16_le(is->next) << (16 - is->bitsleft);
+ is->next += 2;
+ is->bitsleft += 16;
- nextword = le16_to_cpu(*(const le16*)istream->data);
- shift = sizeof(istream->bitbuf) * 8 - 16 - istream->bitsleft;
- istream->bitbuf |= (u32)nextword << shift;
- istream->data += 2;
- istream->bitsleft += 16;
- istream->data_bytes_left -= 2;
+ if (unlikely(num_bits == 17 && is->bitsleft == 16)) {
+ if (unlikely(is->end - is->next < 2))
+ goto overflow;
+
+ is->bitbuf |= (u32)get_unaligned_u16_le(is->next);
+ is->next += 2;
+ is->bitsleft = 32;
}
+
+ return;
+
+overflow:
+ is->bitsleft = 32;
}
-/* Returns the next @num_bits bits from the bitstream, without removing them.
+/* Return the next @num_bits bits from the bitstream, without removing them.
* There must be at least @num_bits remaining in the buffer variable, from a
* previous call to bitstream_ensure_bits(). */
static inline u32
-bitstream_peek_bits(const struct input_bitstream *istream, unsigned num_bits)
+bitstream_peek_bits(const struct input_bitstream *is, const unsigned num_bits)
{
- if (unlikely(num_bits == 0))
- return 0;
- return istream->bitbuf >> (sizeof(istream->bitbuf) * 8 - num_bits);
+ return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
}
-/* Removes @num_bits from the bitstream. There must be at least @num_bits
+/* Remove @num_bits from the bitstream. There must be at least @num_bits
* remaining in the buffer variable, from a previous call to
* bitstream_ensure_bits(). */
static inline void
-bitstream_remove_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_remove_bits(struct input_bitstream *is, unsigned num_bits)
{
- istream->bitbuf <<= num_bits;
- istream->bitsleft -= num_bits;
+ is->bitbuf <<= num_bits;
+ is->bitsleft -= num_bits;
}
-/* Removes and returns @num_bits bits from the bitstream. There must be at
- * least @num_bits remaining in the buffer variable, from a previous call to
+/* Remove and return @num_bits bits from the bitstream. There must be at least
+ * @num_bits remaining in the buffer variable, from a previous call to
* bitstream_ensure_bits(). */
static inline u32
-bitstream_pop_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_pop_bits(struct input_bitstream *is, unsigned num_bits)
{
- u32 n = bitstream_peek_bits(istream, num_bits);
- bitstream_remove_bits(istream, num_bits);
- return n;
+ u32 bits = bitstream_peek_bits(is, num_bits);
+ bitstream_remove_bits(is, num_bits);
+ return bits;
}
-/* Reads and returns the next @num_bits bits from the bitstream.
- * If the input data is exhausted, the bits are assumed to be 0. */
+/* Read and return the next @num_bits bits from the bitstream. */
static inline u32
-bitstream_read_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_read_bits(struct input_bitstream *is, unsigned num_bits)
{
- bitstream_ensure_bits(istream, num_bits);
- return bitstream_pop_bits(istream, num_bits);
+ bitstream_ensure_bits(is, num_bits);
+ return bitstream_pop_bits(is, num_bits);
}
-/* Reads and returns the next literal byte embedded in the bitstream.
- * If the input data is exhausted, the byte is assumed to be 0. */
+/* Read and return the next literal byte embedded in the bitstream. */
static inline u8
-bitstream_read_byte(struct input_bitstream *istream)
+bitstream_read_byte(struct input_bitstream *is)
{
- if (unlikely(istream->data_bytes_left == 0))
+ if (unlikely(is->end == is->next))
return 0;
- istream->data_bytes_left--;
- return *istream->data++;
+ return *is->next++;
}
-/* Reads and returns the next Huffman-encoded symbol from a bitstream. If the
- * input data is exhausted, the Huffman symbol is decoded as if the missing bits
- * are all zeroes. */
+/* Read and return the next 16-bit integer embedded in the bitstream. */
static inline u16
-read_huffsym(struct input_bitstream * restrict istream,
- const u16 decode_table[restrict],
- const u8 lens[restrict],
- unsigned num_syms,
- unsigned table_bits,
- unsigned max_codeword_len)
+bitstream_read_u16(struct input_bitstream *is)
{
+ u16 v;
- bitstream_ensure_bits(istream, max_codeword_len);
+ if (unlikely(is->end - is->next < 2))
+ return 0;
+ v = get_unaligned_u16_le(is->next);
+ is->next += 2;
+ return v;
+}
+
+/* Read and return the next 32-bit integer embedded in the bitstream. */
+static inline u32
+bitstream_read_u32(struct input_bitstream *is)
+{
+ u32 v;
+
+ if (unlikely(is->end - is->next < 4))
+ return 0;
+ v = get_unaligned_u32_le(is->next);
+ is->next += 4;
+ return v;
+}
+
+/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
+ * Return either a pointer to the byte past the last written, or NULL if the
+ * read overflows the input buffer. */
+static inline void *
+bitstream_read_bytes(struct input_bitstream *is, void *dst_buffer, size_t count)
+{
+ if (unlikely(is->end - is->next < count))
+ return NULL;
+ memcpy(dst_buffer, is->next, count);
+ is->next += count;
+ return (u8 *)dst_buffer + count;
+}
+
+/* Align the input bitstream on a coding-unit boundary. */
+static inline void
+bitstream_align(struct input_bitstream *is)
+{
+ is->bitsleft = 0;
+ is->bitbuf = 0;
+}
+
+/* Needed alignment of decode_table parameter to make_huffman_decode_table().
+ *
+ * Reason: We may fill the entries with SSE instructions without worrying
+ * about dealing with the unaligned case. */
+#define DECODE_TABLE_ALIGNMENT 16
- /* Use the next table_bits of the input as an index into the
- * decode_table. */
- u16 key_bits = bitstream_peek_bits(istream, table_bits);
+/* Maximum supported symbol count for make_huffman_decode_table().
+ *
+ * Reason: In direct mapping entries, we store the symbol in 11 bits. */
+#define DECODE_TABLE_MAX_SYMBOLS 2048
- u16 sym = decode_table[key_bits];
+/* Maximum supported table bits for make_huffman_decode_table().
+ *
+ * Reason: In internal binary tree nodes, offsets are encoded in 14 bits.
+ * But the real limit is 13, because we allocate entries past the end of
+ * the direct lookup part of the table for binary tree nodes. (Note: if
+ * needed this limit could be removed by encoding the offsets relative to
+ * &decode_table[1 << table_bits].) */
+#define DECODE_TABLE_MAX_TABLE_BITS 13
- if (likely(sym < num_syms)) {
- /* Fast case: The decode table directly provided the symbol. */
- bitstream_remove_bits(istream, lens[sym]);
+/* Maximum supported codeword length for make_huffman_decode_table().
+ *
+ * Reason: In direct mapping entries, we encode the codeword length in 5
+ * bits, and the top 2 bits can't both be set because that has special
+ * meaning. */
+#define DECODE_TABLE_MAX_CODEWORD_LEN 23
+
+/* Reads and returns the next Huffman-encoded symbol from a bitstream. If the
+ * input data is exhausted, the Huffman symbol is decoded as if the missing bits
+ * are all zeroes.
+ *
+ * XXX: This is mostly duplicated in lzms_decode_huffman_symbol() in
+ * lzms_decompress.c. */
+static inline unsigned
+read_huffsym(struct input_bitstream *istream, const u16 decode_table[],
+ unsigned table_bits, unsigned max_codeword_len)
+{
+ unsigned entry;
+ unsigned key_bits;
+
+ bitstream_ensure_bits(istream, max_codeword_len);
+
+ /* Index the decode table by the next table_bits bits of the input. */
+ key_bits = bitstream_peek_bits(istream, table_bits);
+ entry = decode_table[key_bits];
+ if (likely(entry < 0xC000)) {
+ /* Fast case: The decode table directly provided the
+ * symbol and codeword length. The low 11 bits are the
+ * symbol, and the high 5 bits are the codeword length. */
+ bitstream_remove_bits(istream, entry >> 11);
+ return entry & 0x7FF;
} else {
- /* Slow case: The symbol took too many bits to include directly
- * in the decode table, so search for it in a binary tree at the
- * end of the decode table. */
+ /* Slow case: The codeword for the symbol is longer than
+ * table_bits, so the symbol does not have an entry
+ * directly in the first (1 << table_bits) entries of the
+ * decode table. Traverse the appropriate binary tree
+ * bit-by-bit to decode the symbol. */
bitstream_remove_bits(istream, table_bits);
do {
- key_bits = sym + bitstream_pop_bits(istream, 1);
- } while ((sym = decode_table[key_bits]) >= num_syms);
+ key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
+ } while ((entry = decode_table[key_bits]) >= 0xC000);
+ return entry;
}
- return sym;
}
extern int
make_huffman_decode_table(u16 decode_table[], unsigned num_syms,
- unsigned num_bits, const u8 lengths[],
+ unsigned num_bits, const u8 lens[],
unsigned max_codeword_len);
-/* Minimum alignment for the decode_table parameter to
- * make_huffman_decode_table(). */
-#define DECODE_TABLE_ALIGNMENT 16
+static inline void
+copy_word_unaligned(const void *src, void *dst)
+{
+ store_word_unaligned(load_word_unaligned(src), dst);
+}
+
+static inline machine_word_t
+repeat_byte(u8 b)
+{
+ machine_word_t v;
+
+ STATIC_ASSERT(WORDSIZE == 4 || WORDSIZE == 8);
+
+ v = b;
+ v |= v << 8;
+ v |= v << 16;
+ v |= v << ((WORDSIZE == 8) ? 32 : 0);
+ return v;
+}
+
+/*
+ * Copy an LZ77 match at (dst - offset) to dst.
+ *
+ * The length and offset must be already validated --- that is, (dst - offset)
+ * can't underrun the output buffer, and (dst + length) can't overrun the output
+ * buffer. Also, the length cannot be 0.
+ *
+ * @winend points to the byte past the end of the output buffer.
+ * This function won't write any data beyond this position.
+ */
+static inline void
+lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length)
+{
+ const u8 *src = dst - offset;
+ const u8 * const end = dst + length;
+
+ /*
+ * Try to copy one machine word at a time. On i386 and x86_64 this is
+ * faster than copying one byte at a time, unless the data is
+ * near-random and all the matches have very short lengths. Note that
+ * since this requires unaligned memory accesses, it won't necessarily
+ * be faster on every architecture.
+ *
+ * Also note that we might copy more than the length of the match. For
+ * example, if a word is 8 bytes and the match is of length 5, then
+ * we'll simply copy 8 bytes. This is okay as long as we don't write
+ * beyond the end of the output buffer, hence the check for (winend -
+ * end >= WORDSIZE - 1).
+ */
+ if (UNALIGNED_ACCESS_IS_VERY_FAST &&
+ likely(winend - end >= WORDSIZE - 1))
+ {
+
+ if (offset >= WORDSIZE) {
+ /* The source and destination words don't overlap. */
+
+ /* To improve branch prediction, one iteration of this
+ * loop is unrolled. Most matches are short and will
+ * fail the first check. But if that check passes, then
+ * it becomes increasing likely that the match is long
+ * and we'll need to continue copying. */
+
+ copy_word_unaligned(src, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
+
+ if (dst < end) {
+ do {
+ copy_word_unaligned(src, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
+ } while (dst < end);
+ }
+ return;
+ } else if (offset == 1) {
+
+ /* Offset 1 matches are equivalent to run-length
+ * encoding of the previous byte. This case is common
+ * if the data contains many repeated bytes. */
+
+ machine_word_t v = repeat_byte(*(dst - 1));
+ do {
+ store_word_unaligned(v, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
+ } while (dst < end);
+ return;
+ }
+ /*
+ * We don't bother with special cases for other 'offset <
+ * WORDSIZE', which are usually rarer than 'offset == 1'. Extra
+ * checks will just slow things down. Actually, it's possible
+ * to handle all the 'offset < WORDSIZE' cases using the same
+ * code, but it still becomes more complicated doesn't seem any
+ * faster overall; it definitely slows down the more common
+ * 'offset == 1' case.
+ */
+ }
+
+ /* Fall back to a bytewise copy. */
+
+ if (min_length >= 2) {
+ *dst++ = *src++;
+ length--;
+ }
+ if (min_length >= 3) {
+ *dst++ = *src++;
+ length--;
+ }
+ if (min_length >= 4) {
+ *dst++ = *src++;
+ length--;
+ }
+ do {
+ *dst++ = *src++;
+ } while (--length);
+}
-#endif /* _WIMLIB_DECOMPRESS_H */
+#endif /* _WIMLIB_DECOMPRESS_COMMON_H */