]> wimlib.net Git - wimlib/commitdiff
Rename WORDSIZE to WORDBYTES and introduce WORDBITS
authorEric Biggers <ebiggers3@gmail.com>
Wed, 22 Jun 2016 01:01:59 +0000 (20:01 -0500)
committerEric Biggers <ebiggers3@gmail.com>
Sat, 2 Jul 2016 14:58:24 +0000 (09:58 -0500)
include/wimlib/bitops.h
include/wimlib/decompress_common.h
include/wimlib/lz_extend.h
include/wimlib/types.h
src/decompress_common.c
src/lzms_decompress.c
src/lzx_compress.c

index 70e6c611a888c68feb76ddbb627c57a4d2b89c54..ed8b16cdd2410b1d5649081bc3758938d27dd074 100644 (file)
@@ -55,8 +55,8 @@ fls64(u64 v)
 static inline unsigned
 flsw(machine_word_t v)
 {
 static inline unsigned
 flsw(machine_word_t v)
 {
-       STATIC_ASSERT(WORDSIZE == 4 || WORDSIZE == 8);
-       if (WORDSIZE == 4)
+       STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
+       if (WORDBITS == 32)
                return fls32(v);
        else
                return fls64(v);
                return fls32(v);
        else
                return fls64(v);
@@ -93,8 +93,8 @@ ffs64(u64 v)
 static inline unsigned
 ffsw(machine_word_t v)
 {
 static inline unsigned
 ffsw(machine_word_t v)
 {
-       STATIC_ASSERT(WORDSIZE == 4 || WORDSIZE == 8);
-       if (WORDSIZE == 4)
+       STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
+       if (WORDBITS == 32)
                return ffs32(v);
        else
                return ffs64(v);
                return ffs32(v);
        else
                return ffs64(v);
index f3f1dee56de004be8a04e82b7545425996f8e8d7..a06ede1457359efe7a16de0573969f0418c6ffcc 100644 (file)
@@ -274,12 +274,12 @@ repeat_byte(u8 b)
 {
        machine_word_t v;
 
 {
        machine_word_t v;
 
-       STATIC_ASSERT(WORDSIZE == 4 || WORDSIZE == 8);
+       STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
 
        v = b;
        v |= v << 8;
        v |= v << 16;
 
        v = b;
        v |= v << 8;
        v |= v << 16;
-       v |= v << ((WORDSIZE == 8) ? 32 : 0);
+       v |= v << ((WORDBITS == 64) ? 32 : 0);
        return v;
 }
 
        return v;
 }
 
@@ -310,13 +310,11 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length)
         * example, if a word is 8 bytes and the match is of length 5, then
         * we'll simply copy 8 bytes.  This is okay as long as we don't write
         * beyond the end of the output buffer, hence the check for (winend -
         * example, if a word is 8 bytes and the match is of length 5, then
         * we'll simply copy 8 bytes.  This is okay as long as we don't write
         * beyond the end of the output buffer, hence the check for (winend -
-        * end >= WORDSIZE - 1).
+        * end >= WORDBYTES - 1).
         */
         */
-       if (UNALIGNED_ACCESS_IS_FAST &&
-           likely(winend - end >= WORDSIZE - 1))
-       {
+       if (UNALIGNED_ACCESS_IS_FAST && likely(winend - end >= WORDBYTES - 1)) {
 
 
-               if (offset >= WORDSIZE) {
+               if (offset >= WORDBYTES) {
                        /* The source and destination words don't overlap.  */
 
                        /* To improve branch prediction, one iteration of this
                        /* The source and destination words don't overlap.  */
 
                        /* To improve branch prediction, one iteration of this
@@ -326,14 +324,14 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length)
                         * and we'll need to continue copying.  */
 
                        copy_word_unaligned(src, dst);
                         * and we'll need to continue copying.  */
 
                        copy_word_unaligned(src, dst);
-                       src += WORDSIZE;
-                       dst += WORDSIZE;
+                       src += WORDBYTES;
+                       dst += WORDBYTES;
 
                        if (dst < end) {
                                do {
                                        copy_word_unaligned(src, dst);
 
                        if (dst < end) {
                                do {
                                        copy_word_unaligned(src, dst);
-                                       src += WORDSIZE;
-                                       dst += WORDSIZE;
+                                       src += WORDBYTES;
+                                       dst += WORDBYTES;
                                } while (dst < end);
                        }
                        return;
                                } while (dst < end);
                        }
                        return;
@@ -346,19 +344,19 @@ lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend, u32 min_length)
                        machine_word_t v = repeat_byte(*(dst - 1));
                        do {
                                store_word_unaligned(v, dst);
                        machine_word_t v = repeat_byte(*(dst - 1));
                        do {
                                store_word_unaligned(v, dst);
-                               src += WORDSIZE;
-                               dst += WORDSIZE;
+                               src += WORDBYTES;
+                               dst += WORDBYTES;
                        } while (dst < end);
                        return;
                }
                /*
                 * We don't bother with special cases for other 'offset <
                        } while (dst < end);
                        return;
                }
                /*
                 * We don't bother with special cases for other 'offset <
-                * WORDSIZE', which are usually rarer than 'offset == 1'.  Extra
-                * checks will just slow things down.  Actually, it's possible
-                * to handle all the 'offset < WORDSIZE' cases using the same
-                * code, but it still becomes more complicated doesn't seem any
-                * faster overall; it definitely slows down the more common
-                * 'offset == 1' case.
+                * WORDBYTES', which are usually rarer than 'offset == 1'.
+                * Extra checks will just slow things down.  Actually, it's
+                * possible to handle all the 'offset < WORDBYTES' cases using
+                * the same code, but it still becomes more complicated doesn't
+                * seem any faster overall; it definitely slows down the more
+                * common 'offset == 1' case.
                 */
        }
 
                 */
        }
 
index 2fb76bc921d47c5b1986c087b1e12e24cd7b62ac..c4547e2bc6d9d046e7aebc35ee765eae40db9342 100644 (file)
@@ -32,17 +32,17 @@ static inline u32
 lz_extend(const u8 * const strptr, const u8 * const matchptr,
          u32 len, const u32 max_len)
 {
 lz_extend(const u8 * const strptr, const u8 * const matchptr,
          u32 len, const u32 max_len)
 {
-       while (UNALIGNED_ACCESS_IS_FAST && len + WORDSIZE <= max_len) {
+       while (UNALIGNED_ACCESS_IS_FAST && len + WORDBYTES <= max_len) {
                machine_word_t v = load_word_unaligned(matchptr + len) ^
                                   load_word_unaligned(strptr + len);
                if (v != 0) {
                        if (CPU_IS_LITTLE_ENDIAN)
                                len += ffsw(v) >> 3;
                        else
                machine_word_t v = load_word_unaligned(matchptr + len) ^
                                   load_word_unaligned(strptr + len);
                if (v != 0) {
                        if (CPU_IS_LITTLE_ENDIAN)
                                len += ffsw(v) >> 3;
                        else
-                               len += (8 * WORDSIZE - 1 - flsw(v)) >> 3;
+                               len += (WORDBITS - 1 - flsw(v)) >> 3;
                        return len;
                }
                        return len;
                }
-               len += WORDSIZE;
+               len += WORDBYTES;
        }
 
        while (len < max_len && matchptr[len] == strptr[len])
        }
 
        while (len < max_len && matchptr[len] == strptr[len])
index 87f4604267cc94865d805847b7139f7747fc7c1c..4c85311e2a9c62bebe8e24b48de3edbf91215689 100644 (file)
@@ -47,6 +47,7 @@ typedef struct WIMStruct WIMStruct;
  */
 typedef size_t machine_word_t;
 
  */
 typedef size_t machine_word_t;
 
-#define WORDSIZE       sizeof(machine_word_t)
+#define WORDBYTES      sizeof(machine_word_t)
+#define WORDBITS       (8 * WORDBYTES)
 
 #endif /* _WIMLIB_TYPES_H */
 
 #endif /* _WIMLIB_TYPES_H */
index c927502c34a732c3f8114d0b87bb6ffc704e7fe1..973f467cc6e0c7ffe3a8bb557988f29e2f8bf8f7 100644 (file)
@@ -157,7 +157,7 @@ make_huffman_decode_table(u16 decode_table[const],
        unsigned decode_table_pos;
 
 #ifdef USE_WORD_FILL
        unsigned decode_table_pos;
 
 #ifdef USE_WORD_FILL
-       const unsigned entries_per_word = WORDSIZE / sizeof(decode_table[0]);
+       const unsigned entries_per_word = WORDBYTES / sizeof(decode_table[0]);
 #endif
 
 #ifdef USE_SSE2_FILL
 #endif
 
 #ifdef USE_SSE2_FILL
@@ -291,11 +291,11 @@ make_huffman_decode_table(u16 decode_table[const],
                        aliased_word_t *p;
                        unsigned n;
 
                        aliased_word_t *p;
                        unsigned n;
 
-                       STATIC_ASSERT(WORDSIZE == 4 || WORDSIZE == 8);
+                       STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
 
                        v = MAKE_DIRECT_ENTRY(sorted_syms[sym_idx], codeword_len);
                        v |= v << 16;
 
                        v = MAKE_DIRECT_ENTRY(sorted_syms[sym_idx], codeword_len);
                        v |= v << 16;
-                       v |= v << (WORDSIZE == 8 ? 32 : 0);
+                       v |= v << (WORDBITS == 64 ? 32 : 0);
 
                        p = (aliased_word_t *)decode_table_ptr;
                        n = stores_per_loop;
 
                        p = (aliased_word_t *)decode_table_ptr;
                        n = stores_per_loop;
index e14ba590ac311dc074699470e27277f177bff178..1ea0ac6da3d1e87141ba5252dd33fc14736e2aa1 100644 (file)
@@ -387,7 +387,7 @@ lzms_ensure_bits(struct lzms_input_bitstream *is, unsigned num_bits)
        avail = BITBUF_NBITS - is->bitsleft;
 
        if (UNALIGNED_ACCESS_IS_FAST && CPU_IS_LITTLE_ENDIAN &&
        avail = BITBUF_NBITS - is->bitsleft;
 
        if (UNALIGNED_ACCESS_IS_FAST && CPU_IS_LITTLE_ENDIAN &&
-           WORDSIZE == 8 && likely(is->next - is->begin >= 8))
+           WORDBYTES == 8 && likely(is->next - is->begin >= 8))
        {
                is->next -= (avail & ~15) >> 3;
                is->bitbuf |= load_u64_unaligned(is->next) << (avail & 15);
        {
                is->next -= (avail & ~15) >> 3;
                is->bitbuf |= load_u64_unaligned(is->next) << (avail & 15);
index 19e2daa2a97de024115e4b65ff0097661cd05a73..588b81d8b9a33c1f19196f7735fcc155e675cf8f 100644 (file)
@@ -542,7 +542,7 @@ struct lzx_output_bitstream {
 
 /* Can the specified number of bits always be added to 'bitbuf' after any
  * pending 16-bit coding units have been flushed?  */
 
 /* Can the specified number of bits always be added to 'bitbuf' after any
  * pending 16-bit coding units have been flushed?  */
-#define CAN_BUFFER(n)  ((n) <= (8 * sizeof(machine_word_t)) - 15)
+#define CAN_BUFFER(n)  ((n) <= WORDBITS - 15)
 
 /*
  * Initialize the output bitstream.
 
 /*
  * Initialize the output bitstream.
@@ -892,7 +892,7 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                if (litrunlen) {  /* Is the literal run nonempty?  */
 
                        /* Verify optimization is enabled on 64-bit  */
                if (litrunlen) {  /* Is the literal run nonempty?  */
 
                        /* Verify optimization is enabled on 64-bit  */
-                       STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
+                       STATIC_ASSERT(WORDBITS < 64 ||
                                      CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
 
                        if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
                                      CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT));
 
                        if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) {
@@ -968,7 +968,7 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                                 14 + ALIGNED_CODEWORD_LIMIT)
 
                /* Verify optimization is enabled on 64-bit  */
                                 14 + ALIGNED_CODEWORD_LIMIT)
 
                /* Verify optimization is enabled on 64-bit  */
-               STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
+               STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS));
 
                /* Output the main symbol for the match.  */
 
 
                /* Output the main symbol for the match.  */