]> wimlib.net Git - wimlib/blobdiff - src/lzms_compress.c
Adjust names of get/put unaligned functions
[wimlib] / src / lzms_compress.c
index 385fa0502294b05f29a7eb7fc4113f1349fb4739..f42a46ee2fe8d82292265dd5d9685c7bf9f5bbdf 100644 (file)
@@ -173,7 +173,7 @@ struct lzms_item {
 static inline void
 check_that_powers_fit_in_bitfield(void)
 {
-       BUILD_BUG_ON(LZMS_NUM_DELTA_POWER_SYMS > (1 << (31 - DELTA_SOURCE_POWER_SHIFT)));
+       STATIC_ASSERT(LZMS_NUM_DELTA_POWER_SYMS <= (1 << (31 - DELTA_SOURCE_POWER_SHIFT)));
 }
 
 /* A stripped-down version of the adaptive state in LZMS which excludes the
@@ -249,7 +249,7 @@ struct lzms_optimum_node {
         *
         * Note: this adaptive state structure also does not include the
         * probability entries or current Huffman codewords.  Those aren't
-        * maintained per-position and are only updated occassionally.
+        * maintained per-position and are only updated occasionally.
         */
        struct lzms_adaptive_state state;
 } _aligned_attribute(64);
@@ -498,7 +498,7 @@ lzms_range_encoder_shift_low(struct lzms_range_encoder *rc)
                do {
                        if (likely(rc->next >= rc->begin)) {
                                if (rc->next != rc->end) {
-                                       put_unaligned_u16_le(rc->cache +
+                                       put_unaligned_le16(rc->cache +
                                                             (u16)(rc->lower_bound >> 32),
                                                             rc->next++);
                                }
@@ -658,7 +658,7 @@ lzms_write_bits(struct lzms_output_bitstream *os, const u32 bits,
 
                /* Write a coding unit, unless it would underflow the buffer. */
                if (os->next != os->begin)
-                       put_unaligned_u16_le(os->bitbuf >> os->bitcount, --os->next);
+                       put_unaligned_le16(os->bitbuf >> os->bitcount, --os->next);
 
                /* Optimization for call sites that never write more than 16
                 * bits at once.  */
@@ -679,7 +679,7 @@ lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
                return false;
 
        if (os->bitcount != 0)
-               put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), --os->next);
+               put_unaligned_le16(os->bitbuf << (16 - os->bitcount), --os->next);
 
        return true;
 }
@@ -975,7 +975,7 @@ static inline void
 check_cost_shift(void)
 {
        /* lzms_bit_costs is hard-coded to the current COST_SHIFT.  */
-       BUILD_BUG_ON(COST_SHIFT != 6);
+       STATIC_ASSERT(COST_SHIFT == 6);
 }
 
 #if 0
@@ -1180,7 +1180,7 @@ static void
 lzms_init_delta_matchfinder(struct lzms_compressor *c)
 {
        /* Set all entries to use an invalid power, which will never match.  */
-       BUILD_BUG_ON(NUM_POWERS_TO_CONSIDER >= (1 << (32 - DELTA_SOURCE_POWER_SHIFT)));
+       STATIC_ASSERT(NUM_POWERS_TO_CONSIDER < (1 << (32 - DELTA_SOURCE_POWER_SHIFT)));
        memset(c->delta_hash_table, 0xFF, sizeof(c->delta_hash_table));
 
        /* Initialize the next hash code for each power.  We can just use zeroes
@@ -1203,7 +1203,7 @@ lzms_delta_hash(const u8 *p, const u32 pos, u32 span)
         * include in the hash code computation the span and the low-order bits
         * of the current position.  */
 
-       BUILD_BUG_ON(NBYTES_HASHED_FOR_DELTA != 3);
+       STATIC_ASSERT(NBYTES_HASHED_FOR_DELTA == 3);
        u8 d0 = *(p + 0) - *(p + 0 - span);
        u8 d1 = *(p + 1) - *(p + 1 - span);
        u8 d2 = *(p + 2) - *(p + 2 - span);
@@ -1248,7 +1248,7 @@ lzms_delta_matchfinder_skip_bytes(struct lzms_compressor *c,
                        c->delta_hash_table[hash] =
                                (power << DELTA_SOURCE_POWER_SHIFT) | pos;
                        c->next_delta_hashes[power] = next_hash;
-                       prefetch(&c->delta_hash_table[next_hash]);
+                       prefetchw(&c->delta_hash_table[next_hash]);
                }
        } while (in_next++, pos++, --count);
 }
@@ -1712,7 +1712,7 @@ begin:
                        const u32 pos = in_next - c->in_buffer;
 
                        /* Consider each possible power (log2 of span)  */
-                       BUILD_BUG_ON(NUM_POWERS_TO_CONSIDER > LZMS_NUM_DELTA_POWER_SYMS);
+                       STATIC_ASSERT(NUM_POWERS_TO_CONSIDER <= LZMS_NUM_DELTA_POWER_SYMS);
                        for (u32 power = 0; power < NUM_POWERS_TO_CONSIDER; power++) {
 
                                const u32 span = (u32)1 << power;
@@ -1726,7 +1726,7 @@ begin:
 
                                c->delta_hash_table[hash] = (power << DELTA_SOURCE_POWER_SHIFT) | pos;
                                c->next_delta_hashes[power] = next_hash;
-                               prefetch(&c->delta_hash_table[next_hash]);
+                               prefetchw(&c->delta_hash_table[next_hash]);
 
                                if (power != cur_match >> DELTA_SOURCE_POWER_SHIFT)
                                        continue;
@@ -1741,7 +1741,7 @@ begin:
 
                                /* Check the first 3 bytes before entering the
                                 * extension loop.  */
-                               BUILD_BUG_ON(NBYTES_HASHED_FOR_DELTA != 3);
+                               STATIC_ASSERT(NBYTES_HASHED_FOR_DELTA == 3);
                                if (((u8)(*(in_next + 0) - *(in_next + 0 - span)) !=
                                     (u8)(*(matchptr + 0) - *(matchptr + 0 - span))) ||
                                    ((u8)(*(in_next + 1) - *(in_next + 1 - span)) !=
@@ -2166,8 +2166,8 @@ oom0:
 }
 
 static size_t
-lzms_compress(const void *in, size_t in_nbytes,
-             void *out, size_t out_nbytes_avail, void *_c)
+lzms_compress(const void *restrict in, size_t in_nbytes,
+             void *restrict out, size_t out_nbytes_avail, void *restrict _c)
 {
        struct lzms_compressor *c = _c;
        size_t result;