]> wimlib.net Git - wimlib/blobdiff - src/lzx_compress.c
bitops: rename bit scan functions
[wimlib] / src / lzx_compress.c
index fdbce434cf794d7ccd3881b39c70bcb61c6d454b..10b5190283f23ed06979cfafc95d0a0da0b60c7a 100644 (file)
@@ -889,7 +889,12 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                    const struct lzx_codes *codes)
 {
        const struct lzx_sequence *seq = sequences;
-       u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
+       unsigned min_aligned_offset_slot;
+
+       if (block_type == LZX_BLOCKTYPE_ALIGNED)
+               min_aligned_offset_slot = LZX_MIN_ALIGNED_OFFSET_SLOT;
+       else
+               min_aligned_offset_slot = LZX_MAX_OFFSET_SLOTS;
 
        for (;;) {
                /* Output the next sequence.  */
@@ -971,8 +976,11 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                extra_bits = adjusted_offset - (lzx_offset_slot_base[offset_slot] +
                                                LZX_OFFSET_ADJUSTMENT);
 
-       #define MAX_MATCH_BITS  (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
-                                14 + ALIGNED_CODEWORD_LIMIT)
+       #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT +           \
+                               LENGTH_CODEWORD_LIMIT +         \
+                               LZX_MAX_NUM_EXTRA_BITS -        \
+                               LZX_NUM_ALIGNED_OFFSET_BITS +   \
+                               ALIGNED_CODEWORD_LIMIT)
 
                /* Verify optimization is enabled on 64-bit  */
                STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS));
@@ -1001,12 +1009,13 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                 * there are at least extra 3 offset bits required.  All other
                 * extra offset bits are output verbatim.  */
 
-               if ((adjusted_offset & ones_if_aligned) >= 16) {
+               if (offset_slot >= min_aligned_offset_slot) {
 
                        lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
                                     num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
-                               lzx_flush_bits(os, 14);
+                               lzx_flush_bits(os, LZX_MAX_NUM_EXTRA_BITS -
+                                                  LZX_NUM_ALIGNED_OFFSET_BITS);
 
                        lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
                                                                  LZX_ALIGNED_OFFSET_BITMASK],
@@ -1015,11 +1024,11 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
                } else {
-                       STATIC_ASSERT(CAN_BUFFER(17));
+                       STATIC_ASSERT(CAN_BUFFER(LZX_MAX_NUM_EXTRA_BITS));
 
                        lzx_add_bits(os, extra_bits, num_extra_bits);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
-                               lzx_flush_bits(os, 17);
+                               lzx_flush_bits(os, LZX_MAX_NUM_EXTRA_BITS);
                }
 
                if (CAN_BUFFER(MAX_MATCH_BITS))
@@ -1422,7 +1431,7 @@ lzx_walk_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit,
                /* Record a match. */
 
                /* Tally the aligned offset symbol if needed. */
-               if (adjusted_offset >= 16)
+               if (adjusted_offset >= LZX_MIN_ALIGNED_OFFSET + LZX_OFFSET_ADJUSTMENT)
                        c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++;
 
                /* Record the adjusted length. */
@@ -1704,7 +1713,7 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                                u32 cost;
 
                        #if CONSIDER_ALIGNED_COSTS
-                               if (offset >= 16 - LZX_OFFSET_ADJUSTMENT)
+                               if (offset >= LZX_MIN_ALIGNED_OFFSET)
                                        base_cost += c->costs.aligned[adjusted_offset &
                                                                      LZX_ALIGNED_OFFSET_BITMASK];
                        #endif
@@ -1845,7 +1854,7 @@ lzx_compute_match_costs(struct lzx_compressor *c)
                unsigned i;
 
        #if CONSIDER_ALIGNED_COSTS
-               if (offset_slot >= 8)
+               if (offset_slot >= LZX_MIN_ALIGNED_OFFSET_SLOT)
                        extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * BIT_COST;
        #endif