]> wimlib.net Git - wimlib/commitdiff
LZX pre/post-processing improvements
authorEric Biggers <ebiggers3@gmail.com>
Thu, 25 Dec 2014 01:58:04 +0000 (19:58 -0600)
committerEric Biggers <ebiggers3@gmail.com>
Thu, 25 Dec 2014 02:15:13 +0000 (20:15 -0600)
- much faster inner loop for non-vectorized version
- use either SSE2 or AVX-2 for vectorized version
- faster inner loop for vectorized version
- support vectorized version on buffers that are not properly aligned

src/lzx-common.c

index 1221f61ec33971e4b9759697026a7ce955d5e8cd..b40f43d746dedccdb60a24e47bffcc219063e78f 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * lzx-common.c - Common data for LZX compression and decompression.
+ * lzx-common.c - Common code for LZX compression and decompression.
  */
 
 /*
- * Copyright (C) 2012, 2013 Eric Biggers
+ * Copyright (C) 2012, 2013, 2014 Eric Biggers
  *
  * This file is free software; you can redistribute it and/or modify it under
  * the terms of the GNU Lesser General Public License as published by the Free
@@ -23,6 +23,8 @@
 #  include "config.h"
 #endif
 
+#include <string.h>
+
 #include "wimlib/bitops.h"
 #include "wimlib/endianness.h"
 #include "wimlib/lzx.h"
@@ -186,102 +188,153 @@ undo_translate_target(void *target, s32 input_pos)
  * in calculating the translated jump targets.  But in WIM files, this file size
  * is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000).
  */
-static
-#ifndef __SSE2__
-inline  /* Although inlining the 'process_target' function still speeds up the
-          SSE2 case, it bloats the binary more.  */
-#endif
-void
+static void
 lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32))
 {
+
+#if !defined(__SSE2__) && !defined(__AVX2__)
+       /*
+        * A worthwhile optimization is to push the end-of-buffer check into the
+        * relatively rare E8 case.  This is possible if we replace the last six
+        * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
+        * before reaching end-of-buffer.  In addition, this scheme guarantees
+        * that no translation can begin following an E8 byte in the last 10
+        * bytes because a 4-byte offset containing E8 as its high byte is a
+        * large negative number that is not valid for translation.  That is
+        * exactly what we need.
+        */
+       u8 *tail;
+       u8 saved_bytes[6];
+       u8 *p;
+
+       if (size <= 10)
+               return;
+
+       tail = &data[size - 6];
+       memcpy(saved_bytes, tail, 6);
+       memset(tail, 0xE8, 6);
+       p = data;
+       for (;;) {
+               while (*p != 0xE8)
+                       p++;
+               if (p >= tail)
+                       break;
+               (*process_target)(p + 1, p - data);
+               p += 5;
+       }
+       memcpy(tail, saved_bytes, 6);
+#else
+       /* SSE2 or AVX-2 optimized version for x86_64  */
+
        u8 *p = data;
-#ifdef __SSE2__
-       /* SSE2 vectorized implementation for x86_64.  This speeds up LZX
-        * decompression by about 5-8% overall.  (Usually --- the performance
-        * actually regresses slightly in the degenerate case that the data
-        * consists entirely of 0xe8 bytes.  Also, this optimization affects
-        * compression as well, but the percentage improvement is less because
-        * LZX compression is much slower than LZX decompression. ) */
-       if (size >= 32 && (uintptr_t)p % 16 == 0) {
-
-               u32 valid_mask = 0xFFFFFFFF;
-
-               u8 * const vec_end = p + (size & ~15) - 16;
-
-               /* Create a vector of all 0xe8 bytes  */
-               const __m128i e8_bytes = _mm_set1_epi8(0xe8);
-
-               /* Iterate through the 16-byte vectors in the input.  */
-               do {
-                       /* Compare the current 16-byte vector with the vector of
-                        * all 0xe8 bytes.  This produces 0xff where the byte is
-                        * 0xe8 and 0x00 where it is not.  */
-                       __m128i cmpresult = _mm_cmpeq_epi8(*(const __m128i *)p,
-                                                          e8_bytes);
-
-                       /* Map the comparison results into a single 16-bit
-                        * number.  It will contain a 1 bit when the
-                        * corresponding byte in the current 16-byte vector is
-                        * an e8 byte.  Note: the low-order bit corresponds to
-                        * the first (lowest address) byte.  */
-                       u32 e8_mask = _mm_movemask_epi8(cmpresult);
-
-                       if (!e8_mask) {
-                               /* If e8_mask is 0, then none of these 16 bytes
-                                * have value 0xe8.  No e8 translation is
-                                * needed, and there is no restriction that
-                                * carries over to the next 16 bytes.  */
-                               valid_mask = 0xFFFFFFFF;
-                       } else {
-                               /* At least one byte has value 0xe8.
-                                *
-                                * The AND with valid_mask accounts for the fact
-                                * that we can't start an e8 translation that
-                                * overlaps the previous one.  */
-                               while ((e8_mask &= valid_mask)) {
-
-                                       /* Count the number of trailing zeroes
-                                        * in e8_mask.  This will produce the
-                                        * index of the byte, within the 16, at
-                                        * which the next e8 translation should
-                                        * be done.  */
-                                       int bit = ffs32(e8_mask);
-
-                                       /* Do (or undo) the e8 translation.  */
-                                       (*process_target)(p + bit + 1,
-                                                         p + bit - data);
-
-                                       /* Don't start an e8 translation in the
-                                        * next 4 bytes.  */
-                                       valid_mask &= ~((u32)0x1F << bit);
+       u64 valid_mask = ~0;
+
+       if (size <= 10)
+               return;
+#ifdef __AVX2__
+#  define ALIGNMENT_REQUIRED 32
+#else
+#  define ALIGNMENT_REQUIRED 16
+#endif
+
+       /* Process one byte at a time until the pointer is properly aligned.  */
+       while ((uintptr_t)p % ALIGNMENT_REQUIRED != 0) {
+               if (p >= data + size - 10)
+                       return;
+               if (*p == 0xE8 && (valid_mask & 1)) {
+                       (*process_target)(p + 1, p - data);
+                       valid_mask &= ~0x1F;
+               }
+               p++;
+               valid_mask >>= 1;
+               valid_mask |= (u64)1 << 63;
+       }
+
+       if (data + size - p >= 64) {
+
+               /* Vectorized processing  */
+
+               /* Note: we use a "trap" E8 byte to eliminate the need to check
+                * for end-of-buffer in the inner loop.  This byte is carefully
+                * positioned so that it will never be changed by a previous
+                * translation before it is detected.  */
+
+               u8 *trap = p + ((data + size - p) & ~31) - 32 + 4;
+               u8 saved_byte = *trap;
+               *trap = 0xE8;
+
+               for (;;) {
+                       u32 e8_mask;
+                       u8 *orig_p = p;
+               #ifdef __SSE2__
+                       const __m128i e8_bytes = _mm_set1_epi8(0xE8);
+                       for (;;) {
+                               /* Read the next 32 bytes of data and test them
+                                * for E8 bytes.  */
+                               __m128i bytes1 = *(const __m128i *)p;
+                               __m128i bytes2 = *(const __m128i *)(p + 16);
+                               __m128i cmpresult1 = _mm_cmpeq_epi8(bytes1, e8_bytes);
+                               __m128i cmpresult2 = _mm_cmpeq_epi8(bytes2, e8_bytes);
+                               u32 mask1 = _mm_movemask_epi8(cmpresult1);
+                               u32 mask2 = _mm_movemask_epi8(cmpresult2);
+                               /* The masks have a bit set for each E8 byte.
+                                * We stay in this fast inner loop as long as
+                                * there are no E8 bytes.  */
+                               if (mask1 | mask2) {
+                                       e8_mask = mask1 | (mask2 << 16);
+                                       break;
                                }
-                               /* Moving on to the next vector.  Shift and set
-                                * valid_mask accordingly.  */
-                               valid_mask >>= 16;
-                               valid_mask |= 0xFFFF0000;
+                               p += 32;
+                       }
+               #else
+                       /* AVX-2  */
+                       const __m256i e8_bytes = _mm256_set1_epi8(0xE8);
+                       for (;;) {
+                               __m256i bytes = *(const __m256i *)p;
+                               __m256i cmpresult = _mm256_cmpeq_epi8(bytes, e8_bytes);
+                               e8_mask = _mm256_movemask_epi8(cmpresult);
+                               if (e8_mask)
+                                       break;
+                               p += 32;
+                       }
+               #endif
+
+                       /* Did we pass over data with no E8 bytes?  */
+                       if (p != orig_p)
+                               valid_mask = ~0;
+
+                       /* Are we nearing end-of-buffer?  */
+                       if (p == trap - 4)
+                               break;
+
+                       /* Process the E8 bytes.  However, the AND with
+                        * 'valid_mask' ensures we never process an E8 byte that
+                        * was itself part of a translation target.  */
+                       while ((e8_mask &= valid_mask)) {
+                               unsigned bit = ffs32(e8_mask);
+                               (*process_target)(p + bit + 1, p + bit - data);
+                               valid_mask &= ~((u64)0x1F << bit);
                        }
-               } while ((p += 16) < vec_end);
 
-               while (!(valid_mask & 1)) {
-                       p++;
-                       valid_mask >>= 1;
+                       valid_mask >>= 32;
+                       valid_mask |= 0xFFFFFFFF00000000;
+                       p += 32;
                }
+
+               *trap = saved_byte;
        }
-#endif /* !__SSE2__  */
-
-       if (size > 10) {
-               /* Finish any bytes that weren't processed by the vectorized
-                * implementation.  */
-               u8 *end = data + size - 10;
-               do {
-                       if (*p == 0xe8) {
-                               (*process_target)(p + 1, p - data);
-                               p += 5;
-                       } else {
-                               p++;
-                       }
-               } while (p < end);
+
+       /* Approaching the end of the buffer; process one byte a time.  */
+       while (p < data + size - 10) {
+               if (*p == 0xE8 && (valid_mask & 1)) {
+                       (*process_target)(p + 1, p - data);
+                       valid_mask &= ~0x1F;
+               }
+               p++;
+               valid_mask >>= 1;
+               valid_mask |= (u64)1 << 63;
        }
+#endif /* __SSE2__ || __AVX2__ */
 }
 
 void