* decompress_common.h
*
* Header for decompression code shared by multiple compression formats.
+ *
+ * The author dedicates this file to the public domain.
+ * You can do whatever you want with this file.
*/
#ifndef _WIMLIB_DECOMPRESS_COMMON_H
#include "wimlib/assert.h"
#include "wimlib/compiler.h"
-#include "wimlib/error.h"
#include "wimlib/endianness.h"
#include "wimlib/types.h"
-#ifndef INPUT_IDX_T_DEFINED
-#define INPUT_IDX_T_DEFINED
-typedef u32 input_idx_t;
-#endif
-
-/* Structure to encapsulate a block of in-memory data that is being interpreted
- * as a stream of bits.
- *
- * This is geared specifically towards the XPRESS and LZX compression formats
- * with regards to the actual ordering the bits within the byte sequence. */
+/* Structure that encapsulates a block of in-memory data being interpreted as a
+ * stream of bits, optionally with interwoven literal bytes. Bits are assumed
+ * to be stored in little endian 16-bit coding units, with the bits ordered high
+ * to low. */
struct input_bitstream {
- /* A variable of length at least 32 bits that is used to hold bits that
- * have been read from the stream. The bits are ordered from high-order
- * to low-order, and the next bit is always the high-order bit. */
+ /* Bits that have been read from the input buffer. The bits are
+ * left-justified; the next bit is always bit 31. */
u32 bitbuf;
- /* Number of bits in @bitbuf that are valid. */
- unsigned bitsleft;
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitsleft;
- /* Pointer to the next byte to be retrieved from the input. */
- const u8 *data;
+ /* Pointer to the next byte to be retrieved from the input buffer. */
+ const u8 *next;
- /* Number of bytes of data that are left. */
- input_idx_t data_bytes_left;
+ /* Pointer past the end of the input buffer. */
+ const u8 *end;
};
-/* Initializes a bitstream to receive its input from @data. */
+/* Initialize a bitstream to read from the specified input buffer. */
static inline void
-init_input_bitstream(struct input_bitstream *istream,
- const void *data, input_idx_t num_data_bytes)
+init_input_bitstream(struct input_bitstream *is, const void *buffer, u32 size)
{
- istream->bitbuf = 0;
- istream->bitsleft = 0;
- istream->data = data;
- istream->data_bytes_left = num_data_bytes;
+ is->bitbuf = 0;
+ is->bitsleft = 0;
+ is->next = buffer;
+ is->end = is->next + size;
}
-/* Ensures that the bit buffer variable for the bitstream contains @num_bits
- * bits.
- *
- * If there are at least @num_bits bits remaining in the bitstream, 0 is
- * returned. Otherwise, -1 is returned. */
-static inline int
-bitstream_ensure_bits(struct input_bitstream *istream, unsigned num_bits)
+/* Note: for performance reasons, the following methods don't return error codes
+ * to the caller if the input buffer is overrun. Instead, they just assume that
+ * all overrun data is zeroes. This has no effect on well-formed compressed
+ * data. The only disadvantage is that bad compressed data may go undetected,
+ * but even this is irrelevant if higher level code checksums the uncompressed
+ * data anyway. */
+
+/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
+ * bits. Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
+ * may be called on the bitstream to peek or remove up to @num_bits bits. */
+static inline void
+bitstream_ensure_bits(struct input_bitstream *is, const unsigned num_bits)
{
- for (int nbits = num_bits; (int)istream->bitsleft < nbits; nbits -= 16) {
- u16 nextword;
- unsigned shift;
+ /* This currently works for at most 17 bits. */
+ wimlib_assert2(num_bits <= 17);
+
+ if (is->bitsleft >= num_bits)
+ return;
- if (unlikely(istream->data_bytes_left < 2))
- return -1;
+ if (unlikely(is->end - is->next < 2))
+ goto overflow;
- wimlib_assert2(istream->bitsleft <= sizeof(istream->bitbuf) * 8 - 16);
+ is->bitbuf |= (u32)le16_to_cpu(*(const le16 *)is->next)
+ << (16 - is->bitsleft);
+ is->next += 2;
+ is->bitsleft += 16;
- nextword = le16_to_cpu(*(const le16*)istream->data);
- shift = sizeof(istream->bitbuf) * 8 - 16 - istream->bitsleft;
- istream->bitbuf |= (u32)nextword << shift;
- istream->data += 2;
- istream->bitsleft += 16;
- istream->data_bytes_left -= 2;
+ if (unlikely(num_bits == 17 && is->bitsleft == 16)) {
+ if (unlikely(is->end - is->next < 2))
+ goto overflow;
+ is->bitbuf |= (u32)le16_to_cpu(*(const le16 *)is->next);
+ is->next += 2;
+ is->bitsleft = 32;
}
- return 0;
+
+ return;
+
+overflow:
+ is->bitsleft = 32;
}
-/* Returns the next @num_bits bits in the buffer variable, which must contain at
- * least @num_bits bits, for the bitstream. */
+/* Return the next @num_bits bits from the bitstream, without removing them.
+ * There must be at least @num_bits remaining in the buffer variable, from a
+ * previous call to bitstream_ensure_bits(). */
static inline u32
-bitstream_peek_bits(const struct input_bitstream *istream, unsigned num_bits)
+bitstream_peek_bits(const struct input_bitstream *is, const unsigned num_bits)
{
- wimlib_assert2(istream->bitsleft >= num_bits);
-
if (unlikely(num_bits == 0))
return 0;
-
- return istream->bitbuf >> (sizeof(istream->bitbuf) * 8 - num_bits);
+ return is->bitbuf >> (32 - num_bits);
}
-/* Removes @num_bits bits from the buffer variable, which must contain at least
- * @num_bits bits, for the bitstream. */
+/* Remove @num_bits from the bitstream. There must be at least @num_bits
+ * remaining in the buffer variable, from a previous call to
+ * bitstream_ensure_bits(). */
static inline void
-bitstream_remove_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_remove_bits(struct input_bitstream *is, unsigned num_bits)
{
- wimlib_assert2(istream->bitsleft >= num_bits);
-
- istream->bitbuf <<= num_bits;
- istream->bitsleft -= num_bits;
+ is->bitbuf <<= num_bits;
+ is->bitsleft -= num_bits;
}
-/* Gets and removes @num_bits bits from the buffer variable, which must contain
- * at least @num_bits bits, for the bitstream. */
+/* Remove and return @num_bits bits from the bitstream. There must be at least
+ * @num_bits remaining in the buffer variable, from a previous call to
+ * bitstream_ensure_bits(). */
static inline u32
-bitstream_pop_bits(struct input_bitstream *istream, unsigned num_bits)
+bitstream_pop_bits(struct input_bitstream *is, unsigned num_bits)
{
- u32 n = bitstream_peek_bits(istream, num_bits);
- bitstream_remove_bits(istream, num_bits);
- return n;
+ u32 bits = bitstream_peek_bits(is, num_bits);
+ bitstream_remove_bits(is, num_bits);
+ return bits;
}
-/* Reads @num_bits bits from the input bitstream. On success, returns 0 and
- * returns the requested bits in @n. If there are fewer than @num_bits
- * remaining in the bitstream, -1 is returned. */
-static inline int
-bitstream_read_bits(struct input_bitstream *istream, unsigned num_bits, u32 *n)
+/* Read and return the next @num_bits bits from the bitstream. */
+static inline u32
+bitstream_read_bits(struct input_bitstream *is, unsigned num_bits)
{
- if (unlikely(bitstream_ensure_bits(istream, num_bits)))
- return -1;
+ bitstream_ensure_bits(is, num_bits);
+ return bitstream_pop_bits(is, num_bits);
+}
- *n = bitstream_pop_bits(istream, num_bits);
- return 0;
+/* Read and return the next literal byte embedded in the bitstream. */
+static inline u8
+bitstream_read_byte(struct input_bitstream *is)
+{
+ if (unlikely(is->end == is->next))
+ return 0;
+ return *is->next++;
}
-/* Return the next literal byte embedded in the bitstream, or -1 if the input
- * was exhausted. */
-static inline int
-bitstream_read_byte(struct input_bitstream *istream)
+/* Read and return the next 16-bit integer embedded in the bitstream. */
+static inline u16
+bitstream_read_u16(struct input_bitstream *is)
{
- if (unlikely(istream->data_bytes_left < 1))
- return -1;
+ u16 v;
- istream->data_bytes_left--;
- return *istream->data++;
+ if (unlikely(is->end - is->next < 2))
+ return 0;
+ v = le16_to_cpu(*(const le16 *)is->next);
+ is->next += 2;
+ return v;
}
-/* Reads @num_bits bits from the buffer variable for a bistream without checking
- * to see if that many bits are in the buffer or not. */
+/* Read and return the next 32-bit integer embedded in the bitstream. */
static inline u32
-bitstream_read_bits_nocheck(struct input_bitstream *istream, unsigned num_bits)
+bitstream_read_u32(struct input_bitstream *is)
{
- u32 n = bitstream_peek_bits(istream, num_bits);
- bitstream_remove_bits(istream, num_bits);
- return n;
+ u32 v;
+
+ if (unlikely(is->end - is->next < 4))
+ return 0;
+ v = le32_to_cpu(*(const le32 *)is->next);
+ is->next += 4;
+ return v;
}
-extern int
-read_huffsym_near_end_of_input(struct input_bitstream *istream,
- const u16 decode_table[],
- const u8 lens[],
- unsigned num_syms,
- unsigned table_bits,
- unsigned *n);
-
-/* Read a Huffman-encoded symbol from a bitstream. */
-static inline int
-read_huffsym(struct input_bitstream * restrict istream,
- const u16 decode_table[restrict],
- const u8 lens[restrict],
- unsigned num_syms,
- unsigned table_bits,
- unsigned *restrict n,
- unsigned max_codeword_len)
+/* Read an array of literal bytes embedded in the bitstream. Return a pointer
+ * to the resulting array, or NULL if the read overflows the input buffer. */
+static inline const u8 *
+bitstream_read_bytes(struct input_bitstream *is, size_t count)
{
- /* If there are fewer bits remaining in the input than the maximum
- * codeword length, use the slow path that has extra checks. */
- if (unlikely(bitstream_ensure_bits(istream, max_codeword_len))) {
- return read_huffsym_near_end_of_input(istream, decode_table,
- lens, num_syms,
- table_bits, n);
- }
+ const u8 *p;
+
+ if (unlikely(is->end - is->next < count))
+ return NULL;
+ p = is->next;
+ is->next += count;
+ return p;
+}
+
+/* Align the input bitstream on a coding-unit boundary. */
+static inline void
+bitstream_align(struct input_bitstream *is)
+{
+ is->bitsleft = 0;
+ is->bitbuf = 0;
+}
- /* Use the next table_bits of the input as an index into the
- * decode_table. */
- u16 key_bits = bitstream_peek_bits(istream, table_bits);
+/* Needed alignment of decode_table parameter to make_huffman_decode_table().
+ *
+ * Reason: We may fill the entries with SSE instructions without worrying
+ * about dealing with the unaligned case. */
+#define DECODE_TABLE_ALIGNMENT 16
- u16 sym = decode_table[key_bits];
+/* Maximum supported symbol count for make_huffman_decode_table().
+ *
+ * Reason: In direct mapping entries, we store the symbol in 11 bits. */
+#define DECODE_TABLE_MAX_SYMBOLS 2048
- if (likely(sym < num_syms)) {
- /* Fast case: The decode table directly provided the symbol. */
- bitstream_remove_bits(istream, lens[sym]);
+/* Maximum supported table bits for make_huffman_decode_table().
+ *
+ * Reason: In internal binary tree nodes, offsets are encoded in 14 bits.
+ * But the real limit is 13, because we allocate entries past the end of
+ * the direct lookup part of the table for binary tree nodes. (Note: if
+ * needed this limit could be removed by encoding the offsets relative to
+ * &decode_table[1 << table_bits].) */
+#define DECODE_TABLE_MAX_TABLE_BITS 13
+
+/* Maximum supported codeword length for make_huffman_decode_table().
+ *
+ * Reason: In direct mapping entries, we encode the codeword length in 5
+ * bits, and the top 2 bits can't both be set because that has special
+ * meaning. */
+#define DECODE_TABLE_MAX_CODEWORD_LEN 23
+
+/* Reads and returns the next Huffman-encoded symbol from a bitstream. If the
+ * input data is exhausted, the Huffman symbol is decoded as if the missing bits
+ * are all zeroes.
+ *
+ * XXX: This is mostly duplicated in lzms_huffman_decode_symbol() in
+ * lzms-decompress.c. */
+static inline u16
+read_huffsym(struct input_bitstream *istream, const u16 decode_table[],
+ unsigned table_bits, unsigned max_codeword_len)
+{
+ unsigned entry;
+ unsigned key_bits;
+
+ bitstream_ensure_bits(istream, max_codeword_len);
+
+ /* Index the decode table by the next table_bits bits of the input. */
+ key_bits = bitstream_peek_bits(istream, table_bits);
+ entry = decode_table[key_bits];
+ if (likely(entry < 0xC000)) {
+ /* Fast case: The decode table directly provided the
+ * symbol and codeword length. The low 11 bits are the
+ * symbol, and the high 5 bits are the codeword length. */
+ bitstream_remove_bits(istream, entry >> 11);
+ return entry & 0x7FF;
} else {
- /* Slow case: The symbol took too many bits to include directly
- * in the decode table, so search for it in a binary tree at the
- * end of the decode table. */
+ /* Slow case: The codeword for the symbol is longer than
+ * table_bits, so the symbol does not have an entry
+ * directly in the first (1 << table_bits) entries of the
+ * decode table. Traverse the appropriate binary tree
+ * bit-by-bit to decode the symbol. */
bitstream_remove_bits(istream, table_bits);
do {
- key_bits = sym + bitstream_peek_bits(istream, 1);
- bitstream_remove_bits(istream, 1);
- } while ((sym = decode_table[key_bits]) >= num_syms);
+ key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
+ } while ((entry = decode_table[key_bits]) >= 0xC000);
+ return entry;
}
- *n = sym;
- return 0;
}
extern int
make_huffman_decode_table(u16 decode_table[], unsigned num_syms,
- unsigned num_bits, const u8 lengths[],
+ unsigned num_bits, const u8 lens[],
unsigned max_codeword_len);
-/* Minimum alignment for the decode_table parameter to
- * make_huffman_decode_table(). */
-#define DECODE_TABLE_ALIGNMENT 16
-#endif /* _WIMLIB_DECOMPRESS_H */
+/*
+ * Copy a LZ77 match at (dst - offset) to dst.
+ *
+ * The length and offset must be already validated --- that is, (dst - offset)
+ * can't underrun the output buffer, and (dst + length) can't overrun the output
+ * buffer. Also, the length cannot be 0.
+ *
+ * @winend points to the byte past the end of the output buffer.
+ * This function won't write any data beyond this position.
+ */
+static inline void
+lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend)
+{
+ const u8 *src = dst - offset;
+#if defined(__x86_64__) || defined(__i386__)
+ /* Copy one 'unsigned long' at a time. On i386 and x86_64 this is
+ * faster than copying one byte at a time, unless the data is
+ * near-random and all the matches have very short lengths. Note that
+ * since this requires unaligned memory accesses, it won't necessarily
+ * be faster on every architecture.
+ *
+ * Also note that we might copy more than the length of the match. For
+ * example, if an 'unsigned long' is 8 bytes and the match is of length
+ * 5, then we'll simply copy 8 bytes. This is okay as long as we don't
+ * write beyond the end of the output buffer, hence the check for
+ * (winend - (dst + length) >= sizeof(unsigned long) - 1). */
+ if (offset >= sizeof(unsigned long) &&
+ winend - (dst + length) >= sizeof(unsigned long) - 1)
+ {
+ /* Access memory through a packed struct. This tricks the
+ * compiler into allowing unaligned memory accesses. */
+ struct ulong_wrapper {
+ unsigned long v;
+ } _packed_attribute;
+
+ const u8 * const end = dst + length;
+ unsigned long v;
+
+ v = ((struct ulong_wrapper *)src)->v;
+ ((struct ulong_wrapper *)dst)->v = v;
+ dst += sizeof(unsigned long);
+ src += sizeof(unsigned long);
+
+ if (dst < end) {
+ do {
+ v = ((struct ulong_wrapper *)src)->v;
+ ((struct ulong_wrapper *)dst)->v = v;
+ dst += sizeof(unsigned long);
+ src += sizeof(unsigned long);
+ } while (dst < end);
+ }
+
+ return;
+ }
+#endif
+ do {
+ *dst++ = *src++;
+ } while (--length);
+}
+
+#endif /* _WIMLIB_DECOMPRESS_COMMON_H */