u16 adjusted_length;
/* If bit 31 is clear, then this field contains the match header in bits
- * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
- * Otherwise, this sequence's literal run was the last literal run in
- * the block, so there is no match that follows it. */
+ * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
+ * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
+ * sequence's literal run was the last literal run in the block, so
+ * there is no match that follows it. */
u32 adjusted_offset_and_match_hdr;
};
};
}
-/* Pop a match offset off the front (most recently used) end of the queue. */
-static inline u32
-lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
-{
- u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
- queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
- return offset;
-}
-
/* Swap a match offset to the front of the queue. */
static inline struct lzx_lru_queue
lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
/* The matches and literals that the parser has chosen for the current
* block. The required length of this array is limited by the maximum
- * number of matches that can ever be chosen for a single block. */
- struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
+ * number of matches that can ever be chosen for a single block, plus
+ * one for the special entry at the end. */
+ struct lzx_sequence chosen_sequences[
+ DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
/* Tables for mapping adjusted offsets to offset slots */
/* Can the specified number of bits always be added to 'bitbuf' after any
* pending 16-bit coding units have been flushed? */
-#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16)
+#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15)
/*
* Initialize the output bitstream.
static inline void
lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
{
+ /* Masking the number of bits to shift is only needed to avoid undefined
+ * behavior; we don't actually care about the results of bad shifts. On
+ * x86, the explicit masking generates no extra code. */
+ const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
+
if (os->end - os->next < 6)
return;
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
+ shift_mask), os->next + 0);
if (max_num_bits > 16)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
+ shift_mask), os->next + 2);
if (max_num_bits > 32)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
+ shift_mask), os->next + 4);
os->next += (os->bitcount >> 4) << 1;
os->bitcount &= 15;
}
return 0;
if (os->bitcount != 0) {
- put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
+ put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
os->next += 2;
}
STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
- STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
+ STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
unsigned lit1 = block_data[1];
unsigned lit2 = block_data[2];
unsigned lit3 = block_data[3];
- lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
- lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
- lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
- lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
+ lzx_add_bits(os, codes->codewords.main[lit0],
+ codes->lens.main[lit0]);
+ lzx_add_bits(os, codes->codewords.main[lit1],
+ codes->lens.main[lit1]);
+ lzx_add_bits(os, codes->codewords.main[lit2],
+ codes->lens.main[lit2]);
+ lzx_add_bits(os, codes->codewords.main[lit3],
+ codes->lens.main[lit3]);
lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
block_data += 4;
litrunlen -= 4;
}
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
} else {
lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
/* 32-bit: write 1 literal at a time. */
do {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
} while (--litrunlen);
}
/* If needed, output the length symbol for the match. */
if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
- lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
- codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
+ lzx_add_bits(os, codes->codewords.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS],
+ codes->lens.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS]);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
}
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, 14);
- lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
- codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
+ lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK],
+ codes->lens.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK]);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
} else {
+ STATIC_ASSERT(CAN_BUFFER(17));
+
lzx_add_bits(os, extra_bits, num_extra_bits);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, 17);
* of coding the literal is integrated into the queue update
* code below. */
literal = *in_next++;
- cost = cur_node->cost +
- c->costs.main[lzx_main_symbol_for_literal(literal)];
+ cost = cur_node->cost + c->costs.main[literal];
/* Advance to the next position. */
cur_node++;
static void
lzx_compute_match_costs(struct lzx_compressor *c)
{
- unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
+ unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+ LZX_NUM_LEN_HEADERS;
struct lzx_costs *costs = &c->costs;
for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
- unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
+ unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
+ LZX_NUM_LEN_HEADERS);
unsigned i;
#if LZX_CONSIDER_ALIGNED_COSTS
unsigned i;
const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
- for (i = 0; i < c->num_main_syms; i++)
- c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < c->num_main_syms; i++) {
+ c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+ MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+ c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+ LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#if LZX_CONSIDER_ALIGNED_COSTS
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+ ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#endif
lzx_compute_match_costs(c);
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
- if (unlikely(max_len < 5)) {
+ if (unlikely(max_len <
+ BT_MATCHFINDER_REQUIRED_NBYTES))
+ {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
}
/* Check for matches. */
- lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches,
+ lz_matchptr = CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_get_matches,
in_begin,
in_next - in_begin,
max_len,
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
- if (unlikely(max_len < 5)) {
+ if (unlikely(max_len <
+ BT_MATCHFINDER_REQUIRED_NBYTES))
+ {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
continue;
}
}
- CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position,
+ CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_skip_position,
in_begin,
in_next - in_begin,
max_len,
/* Find the longest match at the current position. */
- cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+ cur_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
in_begin,
in_next - in_begin,
2,
nice_len = min(max_len, nice_len);
}
- next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+ next_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
in_begin,
in_next - in_begin,
cur_len - 2,
lzx_record_match(c, cur_len, cur_offset_data,
recent_offsets, is_16_bit,
&litrunlen, &next_seq);
- in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions,
+ in_next = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_skip_positions,
in_begin,
in_next - in_begin,
in_end - in_begin,
c->impl = lzx_compress_lazy_16;
else
c->impl = lzx_compress_lazy_32;
- c->max_search_depth = (36 * compression_level) / 20;
- c->nice_match_length = (72 * compression_level) / 20;
+ c->max_search_depth = (60 * compression_level) / 20;
+ c->nice_match_length = (80 * compression_level) / 20;
/* lzx_compress_lazy() needs max_search_depth >= 2 because it
* halves the max_search_depth when attempting a lazy match, and
/* Scale nice_match_length and max_search_depth with the
* compression level. */
c->max_search_depth = (24 * compression_level) / 50;
- c->nice_match_length = (32 * compression_level) / 50;
+ c->nice_match_length = (48 * compression_level) / 50;
/* Set a number of optimization passes appropriate for the
* compression level. */
else
memcpy(c->in_buffer, in, in_nbytes);
c->in_nbytes = in_nbytes;
- lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
+ lzx_preprocess(c->in_buffer, in_nbytes);
/* Initially, the previous Huffman codeword lengths are all zeroes. */
c->codes_index = 0;
/* Flush the output bitstream and return the compressed size or 0. */
result = lzx_flush_output(&os);
if (!result && c->destructive)
- lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
+ lzx_postprocess(c->in_buffer, c->in_nbytes);
return result;
}