/* The matches and literals that the parser has chosen for the current
* block. The required length of this array is limited by the maximum
- * number of matches that can ever be chosen for a single block. */
- struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
+ * number of matches that can ever be chosen for a single block, plus
+ * one for the special entry at the end. */
+ struct lzx_sequence chosen_sequences[
+ DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
/* Tables for mapping adjusted offsets to offset slots */
static inline void
lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
{
+ /* Masking the number of bits to shift is only needed to avoid undefined
+ * behavior; we don't actually care about the results of bad shifts. On
+ * x86, the explicit masking generates no extra code. */
+ const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
+
if (os->end - os->next < 6)
return;
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
+ put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) &
+ shift_mask), os->next + 0);
if (max_num_bits > 16)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
+ put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) &
+ shift_mask), os->next + 2);
if (max_num_bits > 32)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
+ put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) &
+ shift_mask), os->next + 4);
os->next += (os->bitcount >> 4) << 1;
os->bitcount &= 15;
}
STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
- STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
+ STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);