#ifndef _WIMLIB_DECOMPRESS_COMMON_H
#define _WIMLIB_DECOMPRESS_COMMON_H
-#include "wimlib/assert.h"
+#include <string.h>
+
#include "wimlib/compiler.h"
-#include "wimlib/endianness.h"
#include "wimlib/types.h"
+#include "wimlib/unaligned.h"
/* Structure that encapsulates a block of in-memory data being interpreted as a
* stream of bits, optionally with interwoven literal bytes. Bits are assumed
bitstream_ensure_bits(struct input_bitstream *is, const unsigned num_bits)
{
/* This currently works for at most 17 bits. */
- wimlib_assert2(num_bits <= 17);
if (is->bitsleft >= num_bits)
return;
if (unlikely(is->end - is->next < 2))
goto overflow;
- is->bitbuf |= (u32)le16_to_cpu(*(const le16 *)is->next)
- << (16 - is->bitsleft);
+ is->bitbuf |= (u32)get_unaligned_u16_le(is->next) << (16 - is->bitsleft);
is->next += 2;
is->bitsleft += 16;
if (unlikely(is->end - is->next < 2))
goto overflow;
- is->bitbuf |= (u32)le16_to_cpu(*(const le16 *)is->next);
+ is->bitbuf |= (u32)get_unaligned_u16_le(is->next);
is->next += 2;
is->bitsleft = 32;
}
static inline u32
bitstream_peek_bits(const struct input_bitstream *is, const unsigned num_bits)
{
- if (unlikely(num_bits == 0))
- return 0;
- return is->bitbuf >> (32 - num_bits);
+ return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
}
/* Remove @num_bits from the bitstream. There must be at least @num_bits
static inline u8
bitstream_read_byte(struct input_bitstream *is)
{
- if (unlikely(is->end - is->next < 1))
+ if (unlikely(is->end == is->next))
return 0;
return *is->next++;
}
+/* Read and return the next 16-bit integer embedded in the bitstream. */
+static inline u16
+bitstream_read_u16(struct input_bitstream *is)
+{
+ u16 v;
+
+ if (unlikely(is->end - is->next < 2))
+ return 0;
+ v = get_unaligned_u16_le(is->next);
+ is->next += 2;
+ return v;
+}
+
/* Read and return the next 32-bit integer embedded in the bitstream. */
static inline u32
bitstream_read_u32(struct input_bitstream *is)
if (unlikely(is->end - is->next < 4))
return 0;
- v = le32_to_cpu(*(const le32 *)is->next);
+ v = get_unaligned_u32_le(is->next);
is->next += 4;
return v;
}
-/* Read an array of literal bytes embedded in the bitstream. Return a pointer
- * to the resulting array, or NULL if the read overflows the input buffer. */
-static inline const u8 *
-bitstream_read_bytes(struct input_bitstream *is, size_t count)
+/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
+ * Return either a pointer to the byte past the last written, or NULL if the
+ * read overflows the input buffer. */
+static inline void *
+bitstream_read_bytes(struct input_bitstream *is, void *dst_buffer, size_t count)
{
- const u8 *p;
-
if (unlikely(is->end - is->next < count))
return NULL;
- p = is->next;
+ memcpy(dst_buffer, is->next, count);
is->next += count;
- return p;
+ return (u8 *)dst_buffer + count;
}
/* Align the input bitstream on a coding-unit boundary. */
* input data is exhausted, the Huffman symbol is decoded as if the missing bits
* are all zeroes.
*
- * XXX: This is mostly duplicated in lzms_huffman_decode_symbol() in
- * lzms-decompress.c. */
-static inline u16
+ * XXX: This is mostly duplicated in lzms_decode_huffman_symbol() in
+ * lzms_decompress.c. */
+static inline unsigned
read_huffsym(struct input_bitstream *istream, const u16 decode_table[],
unsigned table_bits, unsigned max_codeword_len)
{
- u16 entry;
- u16 key_bits;
+ unsigned entry;
+ unsigned key_bits;
bitstream_ensure_bits(istream, max_codeword_len);
unsigned num_bits, const u8 lens[],
unsigned max_codeword_len);
+static inline void
+copy_word_unaligned(const void *src, void *dst)
+{
+ store_word_unaligned(load_word_unaligned(src), dst);
+}
+
+static inline machine_word_t
+repeat_byte(u8 b)
+{
+ machine_word_t v;
+
+ BUILD_BUG_ON(WORDSIZE != 4 && WORDSIZE != 8);
+
+ v = b;
+ v |= v << 8;
+ v |= v << 16;
+ v |= v << ((WORDSIZE == 8) ? 32 : 0);
+ return v;
+}
/*
- * Copy a LZ77 match at (dst - offset) to dst.
+ * Copy an LZ77 match at (dst - offset) to dst.
*
* The length and offset must be already validated --- that is, (dst - offset)
* can't underrun the output buffer, and (dst + length) can't overrun the output
* This function won't write any data beyond this position.
*/
static inline void
-lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend)
+lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length)
{
const u8 *src = dst - offset;
-#if defined(__x86_64__) || defined(__i386__)
- /* Copy one 'unsigned long' at a time. On i386 and x86_64 this is
+ const u8 * const end = dst + length;
+
+ /*
+ * Try to copy one machine word at a time. On i386 and x86_64 this is
* faster than copying one byte at a time, unless the data is
* near-random and all the matches have very short lengths. Note that
* since this requires unaligned memory accesses, it won't necessarily
* be faster on every architecture.
*
* Also note that we might copy more than the length of the match. For
- * example, if an 'unsigned long' is 8 bytes and the match is of length
- * 5, then we'll simply copy 8 bytes. This is okay as long as we don't
- * write beyond the end of the output buffer, hence the check for
- * (winend - (dst + length) >= sizeof(unsigned long) - 1). */
- if (offset >= sizeof(unsigned long) &&
- winend - (dst + length) >= sizeof(unsigned long) - 1)
+ * example, if a word is 8 bytes and the match is of length 5, then
+ * we'll simply copy 8 bytes. This is okay as long as we don't write
+ * beyond the end of the output buffer, hence the check for (winend -
+ * end >= WORDSIZE - 1).
+ */
+ if (UNALIGNED_ACCESS_IS_VERY_FAST &&
+ likely(winend - end >= WORDSIZE - 1))
{
- /* Access memory through a packed struct. This tricks the
- * compiler into allowing unaligned memory accesses. */
- struct ulong_wrapper {
- unsigned long v;
- } _packed_attribute;
-
- const u8 * const end = dst + length;
- unsigned long v;
-
- v = ((struct ulong_wrapper *)src)->v;
- ((struct ulong_wrapper *)dst)->v = v;
- dst += sizeof(unsigned long);
- src += sizeof(unsigned long);
- if (dst < end) {
+ if (offset >= WORDSIZE) {
+ /* The source and destination words don't overlap. */
+
+ /* To improve branch prediction, one iteration of this
+ * loop is unrolled. Most matches are short and will
+ * fail the first check. But if that check passes, then
+ * it becomes increasing likely that the match is long
+ * and we'll need to continue copying. */
+
+ copy_word_unaligned(src, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
+
+ if (dst < end) {
+ do {
+ copy_word_unaligned(src, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
+ } while (dst < end);
+ }
+ return;
+ } else if (offset == 1) {
+
+ /* Offset 1 matches are equivalent to run-length
+ * encoding of the previous byte. This case is common
+ * if the data contains many repeated bytes. */
+
+ machine_word_t v = repeat_byte(*(dst - 1));
do {
- v = ((struct ulong_wrapper *)src)->v;
- ((struct ulong_wrapper *)dst)->v = v;
- dst += sizeof(unsigned long);
- src += sizeof(unsigned long);
+ store_word_unaligned(v, dst);
+ src += WORDSIZE;
+ dst += WORDSIZE;
} while (dst < end);
+ return;
}
+ /*
+ * We don't bother with special cases for other 'offset <
+ * WORDSIZE', which are usually rarer than 'offset == 1'. Extra
+ * checks will just slow things down. Actually, it's possible
+ * to handle all the 'offset < WORDSIZE' cases using the same
+ * code, but it still becomes more complicated doesn't seem any
+ * faster overall; it definitely slows down the more common
+ * 'offset == 1' case.
+ */
+ }
- return;
+ /* Fall back to a bytewise copy. */
+
+ if (min_length >= 2) {
+ *dst++ = *src++;
+ length--;
+ }
+ if (min_length >= 3) {
+ *dst++ = *src++;
+ length--;
+ }
+ if (min_length >= 4) {
+ *dst++ = *src++;
+ length--;
}
-#endif
do {
*dst++ = *src++;
} while (--length);