X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzx_compress.c;h=1bf1bf0970695382bedbe46f2e113c133f3db80d;hp=3e19bd191018ef11f28c7be61debfaf4a80181e2;hb=f316415f58332f07861e954da1088fd772897317;hpb=5391d97b3dfa31dfbe1b9743f3de44042ab2498d diff --git a/src/lzx_compress.c b/src/lzx_compress.c index 3e19bd19..1bf1bf09 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -331,15 +331,6 @@ lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset) }; } -/* Pop a match offset off the front (most recently used) end of the queue. */ -static inline u32 -lzx_lru_queue_pop(struct lzx_lru_queue *queue_p) -{ - u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK; - queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT; - return offset; -} - /* Swap a match offset to the front of the queue. */ static inline struct lzx_lru_queue lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx) @@ -410,8 +401,10 @@ struct lzx_compressor { /* The matches and literals that the parser has chosen for the current * block. The required length of this array is limited by the maximum - * number of matches that can ever be chosen for a single block. */ - struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)]; + * number of matches that can ever be chosen for a single block, plus + * one for the special entry at the end. */ + struct lzx_sequence chosen_sequences[ + DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1]; /* Tables for mapping adjusted offsets to offset slots */ @@ -548,7 +541,7 @@ struct lzx_output_bitstream { /* Can the specified number of bits always be added to 'bitbuf' after any * pending 16-bit coding units have been flushed? */ -#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16) +#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15) /* * Initialize the output bitstream. @@ -585,13 +578,21 @@ lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) static inline void lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits) { + /* Masking the number of bits to shift is only needed to avoid undefined + * behavior; we don't actually care about the results of bad shifts. On + * x86, the explicit masking generates no extra code. */ + const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1; + if (os->end - os->next < 6) return; - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) & + shift_mask), os->next + 0); if (max_num_bits > 16) - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) & + shift_mask), os->next + 2); if (max_num_bits > 32) - put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4); + put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) & + shift_mask), os->next + 4); os->next += (os->bitcount >> 4) << 1; os->bitcount &= 15; } @@ -634,7 +635,7 @@ lzx_make_huffman_codes(struct lzx_compressor *c) STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 && MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN); - STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 && + STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 && LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN); STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS && ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN); @@ -901,23 +902,30 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, unsigned lit1 = block_data[1]; unsigned lit2 = block_data[2]; unsigned lit3 = block_data[3]; - lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]); - lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]); - lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]); - lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]); + lzx_add_bits(os, codes->codewords.main[lit0], + codes->lens.main[lit0]); + lzx_add_bits(os, codes->codewords.main[lit1], + codes->lens.main[lit1]); + lzx_add_bits(os, codes->codewords.main[lit2], + codes->lens.main[lit2]); + lzx_add_bits(os, codes->codewords.main[lit3], + codes->lens.main[lit3]); lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT); block_data += 4; litrunlen -= 4; } if (litrunlen--) { unsigned lit = *block_data++; - lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]); + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); if (litrunlen--) { unsigned lit = *block_data++; - lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]); + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); if (litrunlen--) { unsigned lit = *block_data++; - lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]); + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT); } else { lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT); @@ -930,7 +938,8 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, /* 32-bit: write 1 literal at a time. */ do { unsigned lit = *block_data++; - lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]); + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); lzx_flush_bits(os, MAIN_CODEWORD_LIMIT); } while (--litrunlen); } @@ -970,8 +979,10 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, /* If needed, output the length symbol for the match. */ if (adjusted_length >= LZX_NUM_PRIMARY_LENS) { - lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS], - codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]); + lzx_add_bits(os, codes->codewords.len[adjusted_length - + LZX_NUM_PRIMARY_LENS], + codes->lens.len[adjusted_length - + LZX_NUM_PRIMARY_LENS]); if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT); } @@ -989,11 +1000,15 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, 14); - lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK], - codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]); + lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & + LZX_ALIGNED_OFFSET_BITMASK], + codes->lens.aligned[adjusted_offset & + LZX_ALIGNED_OFFSET_BITMASK]); if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT); } else { + STATIC_ASSERT(CAN_BUFFER(17)); + lzx_add_bits(os, extra_bits, num_extra_bits); if (!CAN_BUFFER(MAX_MATCH_BITS)) lzx_flush_bits(os, 17); @@ -1585,8 +1600,7 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, * of coding the literal is integrated into the queue update * code below. */ literal = *in_next++; - cost = cur_node->cost + - c->costs.main[lzx_main_symbol_for_literal(literal)]; + cost = cur_node->cost + c->costs.main[literal]; /* Advance to the next position. */ cur_node++; @@ -1626,13 +1640,15 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, static void lzx_compute_match_costs(struct lzx_compressor *c) { - unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order); + unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) / + LZX_NUM_LEN_HEADERS; struct lzx_costs *costs = &c->costs; for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) { u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST; - unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0); + unsigned main_symbol = LZX_NUM_CHARS + (offset_slot * + LZX_NUM_LEN_HEADERS); unsigned i; #if LZX_CONSIDER_ALIGNED_COSTS @@ -1710,15 +1726,21 @@ lzx_update_costs(struct lzx_compressor *c) unsigned i; const struct lzx_lens *lens = &c->codes[c->codes_index].lens; - for (i = 0; i < c->num_main_syms; i++) - c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST; + for (i = 0; i < c->num_main_syms; i++) { + c->costs.main[i] = (lens->main[i] ? lens->main[i] : + MAIN_CODEWORD_LIMIT) * LZX_BIT_COST; + } - for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) - c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST; + for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) { + c->costs.len[i] = (lens->len[i] ? lens->len[i] : + LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST; + } #if LZX_CONSIDER_ALIGNED_COSTS - for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) - c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST; + for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { + c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : + ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST; + } #endif lzx_compute_match_costs(c); @@ -1779,9 +1801,9 @@ lzx_compress_near_optimal(struct lzx_compressor *c, const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; const u8 * const in_end = in_begin + c->in_nbytes; - unsigned max_len = LZX_MAX_MATCH_LEN; - unsigned nice_len = min(c->nice_match_length, max_len); - u32 next_hash = 0; + u32 max_len = LZX_MAX_MATCH_LEN; + u32 nice_len = min(c->nice_match_length, max_len); + u32 next_hashes[2] = {}; struct lzx_lru_queue queue; CALL_BT_MF(is_16_bit, c, bt_matchfinder_init); @@ -1797,20 +1819,16 @@ lzx_compress_near_optimal(struct lzx_compressor *c, struct lz_match *cache_ptr = c->match_cache; do { struct lz_match *lz_matchptr; - unsigned best_len; + u32 best_len; /* If approaching the end of the input buffer, adjust * 'max_len' and 'nice_len' accordingly. */ if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - - /* This extra check is needed to ensure that we - * never output a length 2 match of the very - * last two bytes with the very first two bytes, - * since such a match has an offset too large to - * be represented. */ - if (unlikely(max_len < 3)) { + if (unlikely(max_len < + BT_MATCHFINDER_REQUIRED_NBYTES)) + { in_next++; cache_ptr->length = 0; cache_ptr++; @@ -1819,13 +1837,14 @@ lzx_compress_near_optimal(struct lzx_compressor *c, } /* Check for matches. */ - lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches, + lz_matchptr = CALL_BT_MF(is_16_bit, c, + bt_matchfinder_get_matches, in_begin, in_next - in_begin, max_len, nice_len, c->max_search_depth, - &next_hash, + next_hashes, &best_len, cache_ptr + 1); in_next++; @@ -1850,20 +1869,23 @@ lzx_compress_near_optimal(struct lzx_compressor *c, if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - if (unlikely(max_len < 3)) { + if (unlikely(max_len < + BT_MATCHFINDER_REQUIRED_NBYTES)) + { in_next++; cache_ptr->length = 0; cache_ptr++; continue; } } - CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position, + CALL_BT_MF(is_16_bit, c, + bt_matchfinder_skip_position, in_begin, in_next - in_begin, max_len, nice_len, c->max_search_depth, - &next_hash); + next_hashes); in_next++; cache_ptr->length = 0; cache_ptr++; @@ -2016,7 +2038,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, /* Find the longest match at the current position. */ - cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match, + cur_len = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_longest_match, in_begin, in_next - in_begin, 2, @@ -2082,7 +2105,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, nice_len = min(max_len, nice_len); } - next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match, + next_len = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_longest_match, in_begin, in_next - in_begin, cur_len - 2, @@ -2142,7 +2166,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, lzx_record_match(c, cur_len, cur_offset_data, recent_offsets, is_16_bit, &litrunlen, &next_seq); - in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions, + in_next = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_skip_positions, in_begin, in_next - in_begin, in_end - in_begin, @@ -2264,8 +2289,8 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level, c->impl = lzx_compress_lazy_16; else c->impl = lzx_compress_lazy_32; - c->max_search_depth = (36 * compression_level) / 20; - c->nice_match_length = (72 * compression_level) / 20; + c->max_search_depth = (60 * compression_level) / 20; + c->nice_match_length = (80 * compression_level) / 20; /* lzx_compress_lazy() needs max_search_depth >= 2 because it * halves the max_search_depth when attempting a lazy match, and @@ -2284,7 +2309,7 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level, /* Scale nice_match_length and max_search_depth with the * compression level. */ c->max_search_depth = (24 * compression_level) / 50; - c->nice_match_length = (32 * compression_level) / 50; + c->nice_match_length = (48 * compression_level) / 50; /* Set a number of optimization passes appropriate for the * compression level. */ @@ -2345,7 +2370,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes, else memcpy(c->in_buffer, in, in_nbytes); c->in_nbytes = in_nbytes; - lzx_do_e8_preprocessing(c->in_buffer, in_nbytes); + lzx_preprocess(c->in_buffer, in_nbytes); /* Initially, the previous Huffman codeword lengths are all zeroes. */ c->codes_index = 0; @@ -2360,7 +2385,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes, /* Flush the output bitstream and return the compressed size or 0. */ result = lzx_flush_output(&os); if (!result && c->destructive) - lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes); + lzx_postprocess(c->in_buffer, c->in_nbytes); return result; }