#define LZX_BIT_COST 16
/*
- * Consideration of aligned offset costs is disabled for now, due to
- * insufficient benefit gained from the time spent.
+ * Should the compressor take into account the costs of aligned offset symbols?
*/
-#define LZX_CONSIDER_ALIGNED_COSTS 0
+#define LZX_CONSIDER_ALIGNED_COSTS 1
/*
* LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
#include "wimlib/util.h"
/* Matchfinders with 16-bit positions */
-#define pos_t u16
-#define MF_SUFFIX _16
+#define mf_pos_t u16
+#define MF_SUFFIX _16
#include "wimlib/bt_matchfinder.h"
#include "wimlib/hc_matchfinder.h"
/* Matchfinders with 32-bit positions */
-#undef pos_t
+#undef mf_pos_t
#undef MF_SUFFIX
-#define pos_t u32
-#define MF_SUFFIX _32
+#define mf_pos_t u32
+#define MF_SUFFIX _32
#include "wimlib/bt_matchfinder.h"
#include "wimlib/hc_matchfinder.h"
const struct lzx_lens * prev_lens,
struct lzx_output_bitstream * os)
{
- LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
- block_type == LZX_BLOCKTYPE_VERBATIM);
-
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
lzx_write_bits(os, block_type, 3);
u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
is_16_bit);
+ u32 base_cost = cur_node->cost;
+
+ #if LZX_CONSIDER_ALIGNED_COSTS
+ if (offset_data >= 16)
+ base_cost += c->costs.aligned[offset_data &
+ LZX_ALIGNED_OFFSET_BITMASK];
+ #endif
+
do {
- u32 cost = cur_node->cost +
+ u32 cost = base_cost +
c->costs.match_cost[offset_slot][
next_len - LZX_MIN_MATCH_LEN];
- #if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >=
- LZX_NUM_ALIGNED_OFFSET_BITS)
- cost += c->costs.aligned[offset_data &
- LZX_ALIGNED_OFFSET_BITMASK];
- #endif
if (cost < (cur_node + next_len)->cost) {
(cur_node + next_len)->cost = cost;
(cur_node + next_len)->item =
unsigned i;
#if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
+ if (offset_slot >= 8)
extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
#endif
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
const u8 * const in_end = in_begin + c->in_nbytes;
- unsigned max_len = LZX_MAX_MATCH_LEN;
- unsigned nice_len = min(c->nice_match_length, max_len);
- u32 next_hash = 0;
+ u32 max_len = LZX_MAX_MATCH_LEN;
+ u32 nice_len = min(c->nice_match_length, max_len);
+ u32 next_hashes[2] = {};
struct lzx_lru_queue queue;
CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
struct lz_match *cache_ptr = c->match_cache;
do {
struct lz_match *lz_matchptr;
- unsigned best_len;
+ u32 best_len;
/* If approaching the end of the input buffer, adjust
* 'max_len' and 'nice_len' accordingly. */
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
-
- /* This extra check is needed to ensure that we
- * never output a length 2 match of the very
- * last two bytes with the very first two bytes,
- * since such a match has an offset too large to
- * be represented. */
- if (unlikely(max_len < 3)) {
+ if (unlikely(max_len < 5)) {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
max_len,
nice_len,
c->max_search_depth,
- &next_hash,
+ next_hashes,
&best_len,
cache_ptr + 1);
in_next++;
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
- if (unlikely(max_len < 3)) {
+ if (unlikely(max_len < 5)) {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
max_len,
nice_len,
c->max_search_depth,
- &next_hash);
+ next_hashes);
in_next++;
cache_ptr->length = 0;
cache_ptr++;
unsigned *rep_max_idx_ret)
{
STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
- LZX_ASSERT(bytes_remaining >= 2);
const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
const u16 next_2_bytes = load_u16_unaligned(in_next);