]> wimlib.net Git - wimlib/blobdiff - src/lzx_compress.c
lzx_compress.c: remove unused function
[wimlib] / src / lzx_compress.c
index 863bb5e52fae165823c0c4e89ee8122c3745c8e0..1bf1bf0970695382bedbe46f2e113c133f3db80d 100644 (file)
 #define LZX_BIT_COST           16
 
 /*
- * Consideration of aligned offset costs is disabled for now, due to
- * insufficient benefit gained from the time spent.
+ * Should the compressor take into account the costs of aligned offset symbols?
  */
-#define LZX_CONSIDER_ALIGNED_COSTS     0
+#define LZX_CONSIDER_ALIGNED_COSTS     1
 
 /*
  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
 #include "wimlib/util.h"
 
 /* Matchfinders with 16-bit positions  */
-#define pos_t  u16
-#define MF_SUFFIX _16
+#define mf_pos_t       u16
+#define MF_SUFFIX      _16
 #include "wimlib/bt_matchfinder.h"
 #include "wimlib/hc_matchfinder.h"
 
 /* Matchfinders with 32-bit positions  */
-#undef pos_t
+#undef mf_pos_t
 #undef MF_SUFFIX
-#define pos_t  u32
-#define MF_SUFFIX _32
+#define mf_pos_t       u32
+#define MF_SUFFIX      _32
 #include "wimlib/bt_matchfinder.h"
 #include "wimlib/hc_matchfinder.h"
 
@@ -332,15 +331,6 @@ lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
        };
 }
 
-/* Pop a match offset off the front (most recently used) end of the queue.  */
-static inline u32
-lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
-{
-       u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
-       queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
-       return offset;
-}
-
 /* Swap a match offset to the front of the queue.  */
 static inline struct lzx_lru_queue
 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
@@ -411,8 +401,10 @@ struct lzx_compressor {
 
        /* The matches and literals that the parser has chosen for the current
         * block.  The required length of this array is limited by the maximum
-        * number of matches that can ever be chosen for a single block.  */
-       struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
+        * number of matches that can ever be chosen for a single block, plus
+        * one for the special entry at the end.  */
+       struct lzx_sequence chosen_sequences[
+                      DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
 
        /* Tables for mapping adjusted offsets to offset slots  */
 
@@ -549,7 +541,7 @@ struct lzx_output_bitstream {
 
 /* Can the specified number of bits always be added to 'bitbuf' after any
  * pending 16-bit coding units have been flushed?  */
-#define CAN_BUFFER(n)  ((n) <= (8 * sizeof(machine_word_t)) - 16)
+#define CAN_BUFFER(n)  ((n) <= (8 * sizeof(machine_word_t)) - 15)
 
 /*
  * Initialize the output bitstream.
@@ -586,13 +578,21 @@ lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
 static inline void
 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
 {
+       /* Masking the number of bits to shift is only needed to avoid undefined
+        * behavior; we don't actually care about the results of bad shifts.  On
+        * x86, the explicit masking generates no extra code.  */
+       const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
+
        if (os->end - os->next < 6)
                return;
-       put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
+       put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) &
+                                           shift_mask), os->next + 0);
        if (max_num_bits > 16)
-               put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
+               put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) &
+                                               shift_mask), os->next + 2);
        if (max_num_bits > 32)
-               put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
+               put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) &
+                                               shift_mask), os->next + 4);
        os->next += (os->bitcount >> 4) << 1;
        os->bitcount &= 15;
 }
@@ -635,7 +635,7 @@ lzx_make_huffman_codes(struct lzx_compressor *c)
 
        STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
                      MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
-       STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
+       STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
                      LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
        STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
                      ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
@@ -902,23 +902,30 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                                        unsigned lit1 = block_data[1];
                                        unsigned lit2 = block_data[2];
                                        unsigned lit3 = block_data[3];
-                                       lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
-                                       lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
-                                       lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
-                                       lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
+                                       lzx_add_bits(os, codes->codewords.main[lit0],
+                                                    codes->lens.main[lit0]);
+                                       lzx_add_bits(os, codes->codewords.main[lit1],
+                                                    codes->lens.main[lit1]);
+                                       lzx_add_bits(os, codes->codewords.main[lit2],
+                                                    codes->lens.main[lit2]);
+                                       lzx_add_bits(os, codes->codewords.main[lit3],
+                                                    codes->lens.main[lit3]);
                                        lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
                                        block_data += 4;
                                        litrunlen -= 4;
                                }
                                if (litrunlen--) {
                                        unsigned lit = *block_data++;
-                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                    codes->lens.main[lit]);
                                        if (litrunlen--) {
                                                unsigned lit = *block_data++;
-                                               lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                               lzx_add_bits(os, codes->codewords.main[lit],
+                                                            codes->lens.main[lit]);
                                                if (litrunlen--) {
                                                        unsigned lit = *block_data++;
-                                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                                    codes->lens.main[lit]);
                                                        lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
                                                } else {
                                                        lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
@@ -931,7 +938,8 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                                /* 32-bit: write 1 literal at a time.  */
                                do {
                                        unsigned lit = *block_data++;
-                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                    codes->lens.main[lit]);
                                        lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
                                } while (--litrunlen);
                        }
@@ -971,8 +979,10 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                /* If needed, output the length symbol for the match.  */
 
                if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
-                       lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
-                                    codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
+                       lzx_add_bits(os, codes->codewords.len[adjusted_length -
+                                                             LZX_NUM_PRIMARY_LENS],
+                                    codes->lens.len[adjusted_length -
+                                                    LZX_NUM_PRIMARY_LENS]);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
                }
@@ -990,11 +1000,15 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, 14);
 
-                       lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
-                                    codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
+                       lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+                                                                 LZX_ALIGNED_OFFSET_BITMASK],
+                                    codes->lens.aligned[adjusted_offset &
+                                                        LZX_ALIGNED_OFFSET_BITMASK]);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
                } else {
+                       STATIC_ASSERT(CAN_BUFFER(17));
+
                        lzx_add_bits(os, extra_bits, num_extra_bits);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, 17);
@@ -1019,9 +1033,6 @@ lzx_write_compressed_block(const u8 *block_begin,
                           const struct lzx_lens * prev_lens,
                           struct lzx_output_bitstream * os)
 {
-       LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
-                  block_type == LZX_BLOCKTYPE_VERBATIM);
-
        /* The first three bits indicate the type of block and are one of the
         * LZX_BLOCKTYPE_* constants.  */
        lzx_write_bits(os, block_type, 3);
@@ -1560,16 +1571,18 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                                u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
                                unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
                                                                                is_16_bit);
+                               u32 base_cost = cur_node->cost;
+
+                       #if LZX_CONSIDER_ALIGNED_COSTS
+                               if (offset_data >= 16)
+                                       base_cost += c->costs.aligned[offset_data &
+                                                                     LZX_ALIGNED_OFFSET_BITMASK];
+                       #endif
+
                                do {
-                                       u32 cost = cur_node->cost +
+                                       u32 cost = base_cost +
                                                   c->costs.match_cost[offset_slot][
                                                                next_len - LZX_MIN_MATCH_LEN];
-                               #if LZX_CONSIDER_ALIGNED_COSTS
-                                       if (lzx_extra_offset_bits[offset_slot] >=
-                                           LZX_NUM_ALIGNED_OFFSET_BITS)
-                                               cost += c->costs.aligned[offset_data &
-                                                                        LZX_ALIGNED_OFFSET_BITMASK];
-                               #endif
                                        if (cost < (cur_node + next_len)->cost) {
                                                (cur_node + next_len)->cost = cost;
                                                (cur_node + next_len)->item =
@@ -1587,8 +1600,7 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                 * of coding the literal is integrated into the queue update
                 * code below.  */
                literal = *in_next++;
-               cost = cur_node->cost +
-                      c->costs.main[lzx_main_symbol_for_literal(literal)];
+               cost = cur_node->cost + c->costs.main[literal];
 
                /* Advance to the next position.  */
                cur_node++;
@@ -1628,17 +1640,19 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
 static void
 lzx_compute_match_costs(struct lzx_compressor *c)
 {
-       unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
+       unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+                                       LZX_NUM_LEN_HEADERS;
        struct lzx_costs *costs = &c->costs;
 
        for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
 
                u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
-               unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
+               unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
+                                                       LZX_NUM_LEN_HEADERS);
                unsigned i;
 
        #if LZX_CONSIDER_ALIGNED_COSTS
-               if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
+               if (offset_slot >= 8)
                        extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
        #endif
 
@@ -1712,15 +1726,21 @@ lzx_update_costs(struct lzx_compressor *c)
        unsigned i;
        const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
 
-       for (i = 0; i < c->num_main_syms; i++)
-               c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
+       for (i = 0; i < c->num_main_syms; i++) {
+               c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+                                   MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 
-       for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
-               c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
+       for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+               c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+                                  LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 
 #if LZX_CONSIDER_ALIGNED_COSTS
-       for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
-               c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
+       for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+               c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+                                      ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 #endif
 
        lzx_compute_match_costs(c);
@@ -1781,9 +1801,9 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
        const u8 * const in_begin = c->in_buffer;
        const u8 *       in_next = in_begin;
        const u8 * const in_end  = in_begin + c->in_nbytes;
-       unsigned max_len = LZX_MAX_MATCH_LEN;
-       unsigned nice_len = min(c->nice_match_length, max_len);
-       u32 next_hash = 0;
+       u32 max_len = LZX_MAX_MATCH_LEN;
+       u32 nice_len = min(c->nice_match_length, max_len);
+       u32 next_hashes[2] = {};
        struct lzx_lru_queue queue;
 
        CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
@@ -1799,20 +1819,16 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                struct lz_match *cache_ptr = c->match_cache;
                do {
                        struct lz_match *lz_matchptr;
-                       unsigned best_len;
+                       u32 best_len;
 
                        /* If approaching the end of the input buffer, adjust
                         * 'max_len' and 'nice_len' accordingly.  */
                        if (unlikely(max_len > in_end - in_next)) {
                                max_len = in_end - in_next;
                                nice_len = min(max_len, nice_len);
-
-                               /* This extra check is needed to ensure that we
-                                * never output a length 2 match of the very
-                                * last two bytes with the very first two bytes,
-                                * since such a match has an offset too large to
-                                * be represented.  */
-                               if (unlikely(max_len < 3)) {
+                               if (unlikely(max_len <
+                                            BT_MATCHFINDER_REQUIRED_NBYTES))
+                               {
                                        in_next++;
                                        cache_ptr->length = 0;
                                        cache_ptr++;
@@ -1821,13 +1837,14 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                        }
 
                        /* Check for matches.  */
-                       lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches,
+                       lz_matchptr = CALL_BT_MF(is_16_bit, c,
+                                                bt_matchfinder_get_matches,
                                                 in_begin,
                                                 in_next - in_begin,
                                                 max_len,
                                                 nice_len,
                                                 c->max_search_depth,
-                                                &next_hash,
+                                                next_hashes,
                                                 &best_len,
                                                 cache_ptr + 1);
                        in_next++;
@@ -1852,20 +1869,23 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                                        if (unlikely(max_len > in_end - in_next)) {
                                                max_len = in_end - in_next;
                                                nice_len = min(max_len, nice_len);
-                                               if (unlikely(max_len < 3)) {
+                                               if (unlikely(max_len <
+                                                            BT_MATCHFINDER_REQUIRED_NBYTES))
+                                               {
                                                        in_next++;
                                                        cache_ptr->length = 0;
                                                        cache_ptr++;
                                                        continue;
                                                }
                                        }
-                                       CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position,
+                                       CALL_BT_MF(is_16_bit, c,
+                                                  bt_matchfinder_skip_position,
                                                   in_begin,
                                                   in_next - in_begin,
                                                   max_len,
                                                   nice_len,
                                                   c->max_search_depth,
-                                                  &next_hash);
+                                                  next_hashes);
                                        in_next++;
                                        cache_ptr->length = 0;
                                        cache_ptr++;
@@ -1913,7 +1933,6 @@ lzx_find_longest_repeat_offset_match(const u8 * const in_next,
                                     unsigned *rep_max_idx_ret)
 {
        STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
-       LZX_ASSERT(bytes_remaining >= 2);
 
        const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
        const u16 next_2_bytes = load_u16_unaligned(in_next);
@@ -2019,7 +2038,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
 
                        /* Find the longest match at the current position.  */
 
-                       cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+                       cur_len = CALL_HC_MF(is_16_bit, c,
+                                            hc_matchfinder_longest_match,
                                             in_begin,
                                             in_next - in_begin,
                                             2,
@@ -2085,7 +2105,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
                                nice_len = min(max_len, nice_len);
                        }
 
-                       next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+                       next_len = CALL_HC_MF(is_16_bit, c,
+                                             hc_matchfinder_longest_match,
                                              in_begin,
                                              in_next - in_begin,
                                              cur_len - 2,
@@ -2145,7 +2166,8 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
                        lzx_record_match(c, cur_len, cur_offset_data,
                                         recent_offsets, is_16_bit,
                                         &litrunlen, &next_seq);
-                       in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions,
+                       in_next = CALL_HC_MF(is_16_bit, c,
+                                            hc_matchfinder_skip_positions,
                                             in_begin,
                                             in_next - in_begin,
                                             in_end - in_begin,
@@ -2267,8 +2289,8 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
                        c->impl = lzx_compress_lazy_16;
                else
                        c->impl = lzx_compress_lazy_32;
-               c->max_search_depth = (36 * compression_level) / 20;
-               c->nice_match_length = (72 * compression_level) / 20;
+               c->max_search_depth = (60 * compression_level) / 20;
+               c->nice_match_length = (80 * compression_level) / 20;
 
                /* lzx_compress_lazy() needs max_search_depth >= 2 because it
                 * halves the max_search_depth when attempting a lazy match, and
@@ -2287,7 +2309,7 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
                /* Scale nice_match_length and max_search_depth with the
                 * compression level.  */
                c->max_search_depth = (24 * compression_level) / 50;
-               c->nice_match_length = (32 * compression_level) / 50;
+               c->nice_match_length = (48 * compression_level) / 50;
 
                /* Set a number of optimization passes appropriate for the
                 * compression level.  */
@@ -2348,7 +2370,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes,
        else
                memcpy(c->in_buffer, in, in_nbytes);
        c->in_nbytes = in_nbytes;
-       lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
+       lzx_preprocess(c->in_buffer, in_nbytes);
 
        /* Initially, the previous Huffman codeword lengths are all zeroes.  */
        c->codes_index = 0;
@@ -2363,7 +2385,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes,
        /* Flush the output bitstream and return the compressed size or 0.  */
        result = lzx_flush_output(&os);
        if (!result && c->destructive)
-               lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
+               lzx_postprocess(c->in_buffer, c->in_nbytes);
        return result;
 }