X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzx_compress.c;h=c682e7e9a1f26562f9c7fe63249f08d8f7217112;hp=33943af7fa41d1e4d4cb3f4a66bdbc4acd27e93e;hb=558155a9524277661e15a030648c62b32be1cdb6;hpb=7ea97f5338e7fd6b19dbb7df4e8e333d286d8ac2 diff --git a/src/lzx_compress.c b/src/lzx_compress.c index 33943af7..c682e7e9 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -410,8 +410,10 @@ struct lzx_compressor { /* The matches and literals that the parser has chosen for the current * block. The required length of this array is limited by the maximum - * number of matches that can ever be chosen for a single block. */ - struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)]; + * number of matches that can ever be chosen for a single block, plus + * one for the special entry at the end. */ + struct lzx_sequence chosen_sequences[ + DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1]; /* Tables for mapping adjusted offsets to offset slots */ @@ -548,7 +550,7 @@ struct lzx_output_bitstream { /* Can the specified number of bits always be added to 'bitbuf' after any * pending 16-bit coding units have been flushed? */ -#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16) +#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15) /* * Initialize the output bitstream. @@ -585,13 +587,21 @@ lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) static inline void lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits) { + /* Masking the number of bits to shift is only needed to avoid undefined + * behavior; we don't actually care about the results of bad shifts. On + * x86, the explicit masking generates no extra code. */ + const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1; + if (os->end - os->next < 6) return; - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) & + shift_mask), os->next + 0); if (max_num_bits > 16) - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) & + shift_mask), os->next + 2); if (max_num_bits > 32) - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) & + shift_mask), os->next + 4); os->next += (os->bitcount >> 4) << 1; os->bitcount &= 15; } @@ -634,7 +644,7 @@ lzx_make_huffman_codes(struct lzx_compressor *c) STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 && MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN); - STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 && + STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 && LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN); STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS && ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN); @@ -994,6 +1004,8 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT); } else { + STATIC_ASSERT(CAN_BUFFER(17)); + lzx_add_bits(os, extra_bits, num_extra_bits); if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, 17); @@ -1809,7 +1821,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c, if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - if (unlikely(max_len < 5)) { + if (unlikely(max_len < BT_MATCHFINDER_REQUIRED_NBYTES)) { in_next++; cache_ptr->length = 0; cache_ptr++; @@ -1849,7 +1861,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c, if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - if (unlikely(max_len < 5)) { + if (unlikely(max_len < BT_MATCHFINDER_REQUIRED_NBYTES)) { in_next++; cache_ptr->length = 0; cache_ptr++;