X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzx_compress.c;h=36e27dbed6c2ecdda17b4206e3d8a75c0b299ceb;hp=98142962db6d21c866c01367de675a527e28057d;hb=c9be4d389724d00cec9f5e444efb965a449d2ba8;hpb=e9a04c1cb384cf3cf23d70107e85f79c4ac0a555 diff --git a/src/lzx_compress.c b/src/lzx_compress.c index 98142962..36e27dbe 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -114,10 +114,9 @@ #define LZX_BIT_COST 16 /* - * Consideration of aligned offset costs is disabled for now, due to - * insufficient benefit gained from the time spent. + * Should the compressor take into account the costs of aligned offset symbols? */ -#define LZX_CONSIDER_ALIGNED_COSTS 0 +#define LZX_CONSIDER_ALIGNED_COSTS 1 /* * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the @@ -1557,16 +1556,18 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT; unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit); + u32 base_cost = cur_node->cost; + + #if LZX_CONSIDER_ALIGNED_COSTS + if (offset_data >= 16) + base_cost += c->costs.aligned[offset_data & + LZX_ALIGNED_OFFSET_BITMASK]; + #endif + do { - u32 cost = cur_node->cost + + u32 cost = base_cost + c->costs.match_cost[offset_slot][ next_len - LZX_MIN_MATCH_LEN]; - #if LZX_CONSIDER_ALIGNED_COSTS - if (lzx_extra_offset_bits[offset_slot] >= - LZX_NUM_ALIGNED_OFFSET_BITS) - cost += c->costs.aligned[offset_data & - LZX_ALIGNED_OFFSET_BITMASK]; - #endif if (cost < (cur_node + next_len)->cost) { (cur_node + next_len)->cost = cost; (cur_node + next_len)->item = @@ -1635,7 +1636,7 @@ lzx_compute_match_costs(struct lzx_compressor *c) unsigned i; #if LZX_CONSIDER_ALIGNED_COSTS - if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS) + if (offset_slot >= 8) extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST; #endif @@ -1778,9 +1779,9 @@ lzx_compress_near_optimal(struct lzx_compressor *c, const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; const u8 * const in_end = in_begin + c->in_nbytes; - unsigned max_len = LZX_MAX_MATCH_LEN; - unsigned nice_len = min(c->nice_match_length, max_len); - u32 next_hash = 0; + u32 max_len = LZX_MAX_MATCH_LEN; + u32 nice_len = min(c->nice_match_length, max_len); + u32 next_hashes[2] = {}; struct lzx_lru_queue queue; CALL_BT_MF(is_16_bit, c, bt_matchfinder_init); @@ -1796,20 +1797,14 @@ lzx_compress_near_optimal(struct lzx_compressor *c, struct lz_match *cache_ptr = c->match_cache; do { struct lz_match *lz_matchptr; - unsigned best_len; + u32 best_len; /* If approaching the end of the input buffer, adjust * 'max_len' and 'nice_len' accordingly. */ if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - - /* This extra check is needed to ensure that we - * never output a length 2 match of the very - * last two bytes with the very first two bytes, - * since such a match has an offset too large to - * be represented. */ - if (unlikely(max_len < 3)) { + if (unlikely(max_len < 5)) { in_next++; cache_ptr->length = 0; cache_ptr++; @@ -1824,7 +1819,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c, max_len, nice_len, c->max_search_depth, - &next_hash, + next_hashes, &best_len, cache_ptr + 1); in_next++; @@ -1849,7 +1844,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c, if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); - if (unlikely(max_len < 3)) { + if (unlikely(max_len < 5)) { in_next++; cache_ptr->length = 0; cache_ptr++; @@ -1862,7 +1857,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c, max_len, nice_len, c->max_search_depth, - &next_hash); + next_hashes); in_next++; cache_ptr->length = 0; cache_ptr++;