/* Number of bits currently held in @bitbuf */
unsigned bitcount;
- /* Pointer to one past the next position in the output buffer at which
- * to output a 16-bit coding unit */
- le16 *next;
-
/* Pointer to the beginning of the output buffer (this is the "end" when
* writing backwards!) */
- le16 *begin;
+ u8 *begin;
+
+ /* Pointer to just past the next position in the output buffer at which
+ * to output a 16-bit coding unit */
+ u8 *next;
};
/* This structure tracks the state of range encoding and its output, which
u32 cache_size;
/* Pointer to the beginning of the output buffer */
- le16 *begin;
+ u8 *begin;
/* Pointer to the position in the output buffer at which the next coding
* unit must be written */
- le16 *next;
+ u8 *next;
/* Pointer to just past the end of the output buffer */
- le16 *end;
+ u8 *end;
};
/* Bookkeeping information for an adaptive Huffman code */
/*
* Initialize the range encoder @rc to write forwards to the specified buffer
- * @out that is @count 16-bit integers long.
+ * @out that is @size bytes long.
*/
static void
-lzms_range_encoder_init(struct lzms_range_encoder *rc, le16 *out, size_t count)
+lzms_range_encoder_init(struct lzms_range_encoder *rc, u8 *out, size_t size)
{
rc->lower_bound = 0;
rc->range_size = 0xffffffff;
rc->cache = 0;
rc->cache_size = 1;
rc->begin = out;
- rc->next = out - 1;
- rc->end = out + count;
+ rc->next = out - sizeof(le16);
+ rc->end = out + (size & ~1);
}
/*
if (likely(rc->next >= rc->begin)) {
if (rc->next != rc->end) {
put_unaligned_le16(rc->cache +
- (u16)(rc->lower_bound >> 32),
- rc->next++);
+ (u16)(rc->lower_bound >> 32),
+ rc->next);
+ rc->next += sizeof(le16);
}
} else {
- rc->next++;
+ rc->next += sizeof(le16);
}
rc->cache = 0xffff;
} while (--rc->cache_size != 0);
/*
* Initialize the output bitstream @os to write backwards to the specified
- * buffer @out that is @count 16-bit integers long.
+ * buffer @out that is @size bytes long.
*/
static void
lzms_output_bitstream_init(struct lzms_output_bitstream *os,
- le16 *out, size_t count)
+ u8 *out, size_t size)
{
os->bitbuf = 0;
os->bitcount = 0;
- os->next = out + count;
os->begin = out;
+ os->next = out + (size & ~1);
}
/*
os->bitcount -= 16;
/* Write a coding unit, unless it would underflow the buffer. */
- if (os->next != os->begin)
- put_unaligned_le16(os->bitbuf >> os->bitcount, --os->next);
+ if (os->next != os->begin) {
+ os->next -= sizeof(le16);
+ put_unaligned_le16(os->bitbuf >> os->bitcount, os->next);
+ }
/* Optimization for call sites that never write more than 16
* bits at once. */
if (os->next == os->begin)
return false;
- if (os->bitcount != 0)
- put_unaligned_le16(os->bitbuf << (16 - os->bitcount), --os->next);
+ if (os->bitcount != 0) {
+ os->next -= sizeof(le16);
+ put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
+ }
return true;
}
static size_t
lzms_finalize(struct lzms_compressor *c)
{
- size_t num_forwards_units;
- size_t num_backwards_units;
+ size_t num_forwards_bytes;
+ size_t num_backwards_bytes;
/* Flush both the forwards and backwards streams, and make sure they
* didn't cross each other and start overwriting each other's data. */
* bitstream. Move the data output by the backwards bitstream to be
* adjacent to the data output by the forward bitstream, and calculate
* the compressed size that this results in. */
- num_forwards_units = c->rc.next - c->rc.begin;
- num_backwards_units = c->rc.end - c->os.next;
+ num_forwards_bytes = c->rc.next - c->rc.begin;
+ num_backwards_bytes = c->rc.end - c->os.next;
- memmove(c->rc.next, c->os.next, num_backwards_units * sizeof(le16));
+ memmove(c->rc.next, c->os.next, num_backwards_bytes);
- return (num_forwards_units + num_backwards_units) * sizeof(le16);
+ return num_forwards_bytes + num_backwards_bytes;
}
static u64
lzms_init_delta_matchfinder(c);
/* Initialize the encoder structures. */
- lzms_range_encoder_init(&c->rc, out, out_nbytes_avail / sizeof(le16));
- lzms_output_bitstream_init(&c->os, out, out_nbytes_avail / sizeof(le16));
+ lzms_range_encoder_init(&c->rc, out, out_nbytes_avail);
+ lzms_output_bitstream_init(&c->os, out, out_nbytes_avail);
lzms_init_states_and_probabilities(c);
lzms_init_huffman_codes(c, lzms_get_num_offset_slots(c->in_nbytes));
/* Pointer to the next little-endian 16-bit integer in the compressed
* input data (reading forwards). */
- const le16 *next;
+ const u8 *next;
/* Pointer to the end of the compressed input data. */
- const le16 *end;
+ const u8 *end;
};
typedef u64 bitbuf_t;
/* Pointer to the one past the next little-endian 16-bit integer in the
* compressed input data (reading backwards). */
- const le16 *next;
+ const u8 *next;
/* Pointer to the beginning of the compressed input data. */
- const le16 *begin;
+ const u8 *begin;
};
#define BITBUF_NBITS (8 * sizeof(bitbuf_t))
};
/* Initialize the input bitstream @is to read backwards from the compressed data
- * buffer @in that is @count 16-bit integers long. */
+ * buffer @in that is @count bytes long. */
static void
lzms_input_bitstream_init(struct lzms_input_bitstream *is,
- const le16 *in, size_t count)
+ const u8 *in, size_t count)
{
is->bitbuf = 0;
is->bitsleft = 0;
avail = BITBUF_NBITS - is->bitsleft;
if (UNALIGNED_ACCESS_IS_FAST && CPU_IS_LITTLE_ENDIAN &&
- WORDSIZE == 8 && likely((u8 *)is->next - (u8 *)is->begin >= 8))
+ WORDSIZE == 8 && likely(is->next - is->begin >= 8))
{
- is->next -= avail >> 4;
+ is->next -= (avail & ~15) >> 3;
is->bitbuf |= load_u64_unaligned(is->next) << (avail & 15);
is->bitsleft += avail & ~15;
} else {
- if (likely(is->next != is->begin))
- is->bitbuf |= (bitbuf_t)le16_to_cpu(*--is->next)
+ if (likely(is->next != is->begin)) {
+ is->next -= sizeof(le16);
+ is->bitbuf |= (bitbuf_t)get_unaligned_le16(is->next)
<< (avail - 16);
- if (likely(is->next != is->begin))
- is->bitbuf |=(bitbuf_t)le16_to_cpu(*--is->next)
+ }
+ if (likely(is->next != is->begin)) {
+ is->next -= sizeof(le16);
+ is->bitbuf |= (bitbuf_t)get_unaligned_le16(is->next)
<< (avail - 32);
+ }
is->bitsleft += 32;
}
}
}
/* Initialize the range decoder @rd to read forwards from the compressed data
- * buffer @in that is @count 16-bit integers long. */
+ * buffer @in that is @count bytes long. */
static void
lzms_range_decoder_init(struct lzms_range_decoder *rd,
- const le16 *in, size_t count)
+ const u8 *in, size_t count)
{
rd->range = 0xffffffff;
- rd->code = ((u32)le16_to_cpu(in[0]) << 16) | le16_to_cpu(in[1]);
- rd->next = in + 2;
+ rd->code = ((u32)get_unaligned_le16(in) << 16) |
+ get_unaligned_le16(in + 2);
+ rd->next = in + 4;
rd->end = in + count;
}
if (!(rd->range & 0xFFFF0000)) {
rd->range <<= 16;
rd->code <<= 16;
- if (likely(rd->next != rd->end))
- rd->code |= le16_to_cpu(*rd->next++);
+ if (likely(rd->next != rd->end)) {
+ rd->code |= get_unaligned_le16(rd->next);
+ rd->next += sizeof(le16);
+ }
}
/* Based on the probability, calculate the bound between the 0-bit
*
* 1. LZMS-compressed data is a series of 16-bit integers, so the
* compressed data buffer cannot take up an odd number of bytes.
- * 2. To prevent poor performance on some architectures, we require that
- * the compressed data buffer is 2-byte aligned.
- * 3. There must be at least 4 bytes of compressed data, since otherwise
+ * 2. There must be at least 4 bytes of compressed data, since otherwise
* we cannot even initialize the range decoder.
*/
- if ((in_nbytes & 1) || ((uintptr_t)in & 1) || (in_nbytes < 4))
+ if ((in_nbytes & 1) || (in_nbytes < 4))
return -1;
- lzms_range_decoder_init(&rd, in, in_nbytes / sizeof(le16));
+ lzms_range_decoder_init(&rd, in, in_nbytes);
- lzms_input_bitstream_init(&is, in, in_nbytes / sizeof(le16));
+ lzms_input_bitstream_init(&is, in, in_nbytes);
lzms_init_probabilities(&d->probs);