]> wimlib.net Git - wimlib/blobdiff - src/lzx-decompress.c
{lzx,lzms-decompress.c}: Allocate context with DECODE_TABLE_ALIGNMENT
[wimlib] / src / lzx-decompress.c
index f7f6661649cb78fae376ad7c3a4e29c63c11df70..d259eeedc38ae31b6c7156fc0b1599f3b2af8d90 100644 (file)
 
 #include <string.h>
 
-#ifdef __SSE2__
-#  include <emmintrin.h>
-#endif
-
 /* Huffman decoding tables and maps from symbols to code lengths. */
 struct lzx_tables {
 
@@ -151,22 +147,18 @@ struct lzx_decompressor {
  */
 static inline u16
 read_huffsym_using_pretree(struct input_bitstream *istream,
-                          const u16 pretree_decode_table[],
-                          const u8 pretree_lens[])
+                          const u16 pretree_decode_table[])
 {
-       return read_huffsym(istream, pretree_decode_table, pretree_lens,
-                           LZX_PRECODE_NUM_SYMBOLS, LZX_PRECODE_TABLEBITS,
-                           LZX_MAX_PRE_CODEWORD_LEN);
+       return read_huffsym(istream, pretree_decode_table,
+                           LZX_PRECODE_TABLEBITS, LZX_MAX_PRE_CODEWORD_LEN);
 }
 
 /* Reads a Huffman-encoded symbol using the main tree. */
 static inline u16
 read_huffsym_using_maintree(struct input_bitstream *istream,
-                           const struct lzx_tables *tables,
-                           unsigned num_main_syms)
+                           const struct lzx_tables *tables)
 {
        return read_huffsym(istream, tables->maintree_decode_table,
-                           tables->maintree_lens, num_main_syms,
                            LZX_MAINCODE_TABLEBITS, LZX_MAX_MAIN_CODEWORD_LEN);
 }
 
@@ -176,7 +168,6 @@ read_huffsym_using_lentree(struct input_bitstream *istream,
                           const struct lzx_tables *tables)
 {
        return read_huffsym(istream, tables->lentree_decode_table,
-                           tables->lentree_lens, LZX_LENCODE_NUM_SYMBOLS,
                            LZX_LENCODE_TABLEBITS, LZX_MAX_LEN_CODEWORD_LEN);
 }
 
@@ -186,10 +177,7 @@ read_huffsym_using_alignedtree(struct input_bitstream *istream,
                               const struct lzx_tables *tables)
 {
        return read_huffsym(istream, tables->alignedtree_decode_table,
-                           tables->alignedtree_lens,
-                           LZX_ALIGNEDCODE_NUM_SYMBOLS,
-                           LZX_ALIGNEDCODE_TABLEBITS,
-                           LZX_MAX_ALIGNED_CODEWORD_LEN);
+                           LZX_ALIGNEDCODE_TABLEBITS, LZX_MAX_ALIGNED_CODEWORD_LEN);
 }
 
 /*
@@ -250,8 +238,7 @@ lzx_read_code_lens(struct input_bitstream *istream, u8 lens[],
                signed char value;
 
                tree_code = read_huffsym_using_pretree(istream,
-                                                      pretree_decode_table,
-                                                      pretree_lens);
+                                                      pretree_decode_table);
                switch (tree_code) {
                case 17: /* Run of 0's */
                        num_zeroes = bitstream_read_bits(istream, 4);
@@ -275,8 +262,7 @@ lzx_read_code_lens(struct input_bitstream *istream, u8 lens[],
                        num_same = bitstream_read_bits(istream, 1);
                        num_same += 4;
                        code = read_huffsym_using_pretree(istream,
-                                                         pretree_decode_table,
-                                                         pretree_lens);
+                                                         pretree_decode_table);
                        value = (signed char)*lens - (signed char)code;
                        if (value < 0)
                                value += 17;
@@ -562,22 +548,15 @@ lzx_decode_match(unsigned main_element, int block_type,
        /* If the position_slot is 0, 1, or 2, the match offset is retrieved
         * from the LRU queue.  Otherwise, the match offset is not in the LRU
         * queue. */
-       switch (position_slot) {
-       case 0:
-               match_offset = queue->R[0];
-               break;
-       case 1:
-               match_offset = queue->R[1];
-               swap(queue->R[0], queue->R[1]);
-               break;
-       case 2:
-               /* The queue doesn't work quite the same as a real LRU queue,
-                * since using the R2 offset doesn't bump the R1 offset down to
-                * R2. */
-               match_offset = queue->R[2];
-               swap(queue->R[0], queue->R[2]);
-               break;
-       default:
+       if (position_slot <= 2) {
+               /* Note: This isn't a real LRU queue, since using the R2 offset
+                * doesn't bump the R1 offset down to R2.  This quirk allows all
+                * 3 recent offsets to be handled by the same code.  (For R0,
+                * the swap is a no-op.)  */
+               match_offset = queue->R[position_slot];
+               queue->R[position_slot] = queue->R[0];
+               queue->R[0] = match_offset;
+       } else {
                /* Otherwise, the offset was not encoded as one the offsets in
                 * the queue.  Depending on the position slot, there is a
                 * certain number of extra bits that need to be read to fully
@@ -622,7 +601,6 @@ lzx_decode_match(unsigned main_element, int block_type,
                queue->R[2] = queue->R[1];
                queue->R[1] = queue->R[0];
                queue->R[0] = match_offset;
-               break;
        }
 
        /* Verify that the match is in the bounds of the part of the window
@@ -665,144 +643,6 @@ lzx_decode_match(unsigned main_element, int block_type,
        return match_len;
 }
 
-static void
-undo_call_insn_translation(u32 *call_insn_target, s32 input_pos)
-{
-       s32 abs_offset;
-       s32 rel_offset;
-
-       abs_offset = le32_to_cpu(*call_insn_target);
-       if (abs_offset >= -input_pos && abs_offset < LZX_WIM_MAGIC_FILESIZE) {
-               if (abs_offset >= 0) {
-                       /* "good translation" */
-                       rel_offset = abs_offset - input_pos;
-               } else {
-                       /* "compensating translation" */
-                       rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE;
-               }
-               *call_insn_target = cpu_to_le32(rel_offset);
-       }
-}
-
-/* Undo the 'E8' preprocessing, where the targets of x86 CALL instructions were
- * changed from relative offsets to absolute offsets.
- *
- * Note that this call instruction preprocessing can and will be used on any
- * data even if it is not actually x86 machine code.  In fact, this type of
- * preprocessing appears to always be used in LZX-compressed resources in WIM
- * files; there is no bit to indicate whether it is used or not, unlike in the
- * LZX compressed format as used in cabinet files, where a bit is reserved for
- * that purpose.
- *
- * Call instruction preprocessing is disabled in the last 6 bytes of the
- * uncompressed data, which really means the 5-byte call instruction cannot
- * start in the last 10 bytes of the uncompressed data.  This is one of the
- * errors in the LZX documentation.
- *
- * Call instruction preprocessing does not appear to be disabled after the
- * 32768th chunk of a WIM stream, which is apparently is yet another difference
- * from the LZX compression used in cabinet files.
- *
- * Call instruction processing is supposed to take the file size as a parameter,
- * as it is used in calculating the translated jump targets.  But in WIM files,
- * this file size is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000).*/
-static void
-undo_call_insn_preprocessing(u8 *uncompressed_data, size_t uncompressed_size)
-{
-#ifdef __SSE2__
-
-       /* SSE2 vectorized implementation for x86_64.  This speeds up LZX
-        * decompression by about 5-8% overall.  (Usually --- the performance
-        * actually regresses slightly in the degenerate case that the data
-        * consists entirely of 0xe8 bytes.)  */
-       __m128i *p128 = (__m128i *)uncompressed_data;
-       u32 valid_mask = 0xFFFFFFFF;
-
-       if (uncompressed_size >= 32 &&
-           ((uintptr_t)uncompressed_data % 16 == 0))
-       {
-               __m128i * const end128 = p128 + uncompressed_size / 16 - 1;
-
-               /* Create a vector of all 0xe8 bytes  */
-               const __m128i e8_bytes = _mm_set1_epi8(0xe8);
-
-               /* Iterate through the 16-byte vectors in the input.  */
-               do {
-                       /* Compare the current 16-byte vector with the vector of
-                        * all 0xe8 bytes.  This produces 0xff where the byte is
-                        * 0xe8 and 0x00 where it is not.  */
-                       __m128i cmpresult = _mm_cmpeq_epi8(*p128, e8_bytes);
-
-                       /* Map the comparison results into a single 16-bit
-                        * number.  It will contain a 1 bit when the
-                        * corresponding byte in the current 16-byte vector is
-                        * an e8 byte.  Note: the low-order bit corresponds to
-                        * the first (lowest address) byte.  */
-                       u32 e8_mask = _mm_movemask_epi8(cmpresult);
-
-                       if (!e8_mask) {
-                               /* If e8_mask is 0, then none of these 16 bytes
-                                * have value 0xe8.  No e8 translation is
-                                * needed, and there is no restriction that
-                                * carries over to the next 16 bytes.  */
-                               valid_mask = 0xFFFFFFFF;
-                       } else {
-                               /* At least one byte has value 0xe8.
-                                *
-                                * The AND with valid_mask accounts for the fact
-                                * that we can't start an e8 translation that
-                                * overlaps the previous one.  */
-                               while ((e8_mask &= valid_mask)) {
-
-                                       /* Count the number of trailing zeroes
-                                        * in e8_mask.  This will produce the
-                                        * index of the byte, within the 16, at
-                                        * which the next e8 translation should
-                                        * be done.  */
-                                       u32 bit = __builtin_ctz(e8_mask);
-
-                                       /* Do the e8 translation.  */
-                                       u8 *p8 = (u8 *)p128 + bit;
-                                       undo_call_insn_translation((s32 *)(p8 + 1),
-                                                                  p8 - uncompressed_data);
-
-                                       /* Don't start an e8 translation in the
-                                        * next 4 bytes.  */
-                                       valid_mask &= ~((u32)0x1F << bit);
-                               }
-                               /* Moving on to the next vector.  Shift and set
-                                * valid_mask accordingly.  */
-                               valid_mask >>= 16;
-                               valid_mask |= 0xFFFF0000;
-                       }
-               } while (++p128 < end128);
-       }
-
-       u8 *p8 = (u8 *)p128;
-       while (!(valid_mask & 1)) {
-               p8++;
-               valid_mask >>= 1;
-       }
-#else /* __SSE2__  */
-       u8 *p8 = uncompressed_data;
-#endif /* !__SSE2__  */
-
-       if (uncompressed_size > 10) {
-               /* Finish any bytes that weren't processed by the vectorized
-                * implementation.  */
-               u8 *p8_end = uncompressed_data + uncompressed_size - 10;
-               do {
-                       if (*p8 == 0xe8) {
-                               undo_call_insn_translation((s32 *)(p8 + 1),
-                                                          p8 - uncompressed_data);
-                               p8 += 5;
-                       } else {
-                               p8++;
-                       }
-               } while (p8 < p8_end);
-       }
-}
-
 /*
  * Decompresses an LZX-compressed block of data from which the header has already
  * been read.
@@ -810,7 +650,6 @@ undo_call_insn_preprocessing(u8 *uncompressed_data, size_t uncompressed_size)
  * @block_type:        The type of the block (LZX_BLOCKTYPE_VERBATIM or
  *             LZX_BLOCKTYPE_ALIGNED)
  * @block_size:        The size of the block, in bytes.
- * @num_main_syms:     Number of symbols in the main alphabet.
  * @window:    Pointer to the decompression window.
  * @window_pos:        The current position in the window.  Will be 0 for the first
  *                     block.
@@ -821,7 +660,6 @@ undo_call_insn_preprocessing(u8 *uncompressed_data, size_t uncompressed_size)
  */
 static int
 lzx_decompress_block(int block_type, unsigned block_size,
-                    unsigned num_main_syms,
                     u8 *window,
                     unsigned window_pos,
                     const struct lzx_tables *tables,
@@ -834,8 +672,7 @@ lzx_decompress_block(int block_type, unsigned block_size,
 
        end = window_pos + block_size;
        while (window_pos < end) {
-               main_element = read_huffsym_using_maintree(istream, tables,
-                                                          num_main_syms);
+               main_element = read_huffsym_using_maintree(istream, tables);
                if (main_element < LZX_NUM_CHARS) {
                        /* literal: 0 to LZX_NUM_CHARS - 1 */
                        window[window_pos++] = main_element;
@@ -928,7 +765,6 @@ lzx_decompress(const void *compressed_data, size_t compressed_size,
                                LZX_DEBUG("LZX_BLOCKTYPE_ALIGNED");
                        ret = lzx_decompress_block(block_type,
                                                   block_size,
-                                                  ctx->num_main_syms,
                                                   uncompressed_data,
                                                   window_pos,
                                                   &ctx->tables,
@@ -964,7 +800,7 @@ lzx_decompress(const void *compressed_data, size_t compressed_size,
                }
        }
        if (e8_preprocessing_done)
-               undo_call_insn_preprocessing(uncompressed_data, uncompressed_size);
+               lzx_undo_e8_preprocessing(uncompressed_data, uncompressed_size);
        return 0;
 }
 
@@ -973,7 +809,7 @@ lzx_free_decompressor(void *_ctx)
 {
        struct lzx_decompressor *ctx = _ctx;
 
-       FREE(ctx);
+       ALIGNED_FREE(ctx);
 }
 
 static int
@@ -986,7 +822,8 @@ lzx_create_decompressor(size_t max_window_size,
        if (!lzx_window_size_valid(max_window_size))
                return WIMLIB_ERR_INVALID_PARAM;
 
-       ctx = MALLOC(sizeof(struct lzx_decompressor));
+       ctx = ALIGNED_MALLOC(sizeof(struct lzx_decompressor),
+                            DECODE_TABLE_ALIGNMENT);
        if (ctx == NULL)
                return WIMLIB_ERR_NOMEM;