X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzx-common.c;h=b40f43d746dedccdb60a24e47bffcc219063e78f;hp=e61629e700dfa8414fc3e18665c244bf1e0c09ba;hb=3872f9e0d30f6439b2a08e091fbd3df042841b6a;hpb=56f882a80475fbe170f3d580400eb6f011ec5dfb diff --git a/src/lzx-common.c b/src/lzx-common.c index e61629e7..b40f43d7 100644 --- a/src/lzx-common.c +++ b/src/lzx-common.c @@ -1,41 +1,43 @@ /* - * lzx-common.c - Common data for LZX compression and decompression. + * lzx-common.c - Common code for LZX compression and decompression. */ /* - * Copyright (C) 2012, 2013 Eric Biggers + * Copyright (C) 2012, 2013, 2014 Eric Biggers * - * This file is part of wimlib, a library for working with WIM files. + * This file is free software; you can redistribute it and/or modify it under + * the terms of the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) any + * later version. * - * wimlib is free software; you can redistribute it and/or modify it under the - * terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. - * - * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR - * A PARTICULAR PURPOSE. See the GNU General Public License for more + * This file is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * - * You should have received a copy of the GNU General Public License - * along with wimlib; if not, see http://www.gnu.org/licenses/. + * You should have received a copy of the GNU Lesser General Public License + * along with this file; if not, see http://www.gnu.org/licenses/. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif +#include + +#include "wimlib/bitops.h" #include "wimlib/endianness.h" #include "wimlib/lzx.h" +#include "wimlib/unaligned.h" #include "wimlib/util.h" #ifdef __SSE2__ # include #endif -/* Mapping: position slot => first match offset that uses that position slot. +/* Mapping: offset slot => first match offset that uses that offset slot. */ -const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS] = { +const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS] = { 0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */ 6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */ 32 , 48 , 64 , 96 , 128 , /* 10 --- 14 */ @@ -49,10 +51,9 @@ const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS] = { 2097152 /* 50 */ }; -/* Mapping: position slot => how many extra bits must be read and added to the - * corresponding position base to decode the match offset. */ -#ifdef USE_LZX_EXTRA_BITS_ARRAY -const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS] = { +/* Mapping: offset slot => how many extra bits must be read and added to the + * corresponding offset slot base to decode the match offset. */ +const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS] = { 0 , 0 , 0 , 0 , 1 , 1 , 2 , 2 , 3 , 3 , 4 , 4 , 5 , 5 , 6 , @@ -65,62 +66,69 @@ const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS] = { 17, 17, 17, 17, 17, 17 }; -#endif -/* LZX window size must be a power of 2 between 2^15 and 2^21, inclusively. */ -bool -lzx_window_size_valid(size_t window_size) +/* Round the specified compression block size (not LZX block size) up to the + * next valid LZX window size, and return its order (log2). Or, if the block + * size is 0 or greater than the largest valid LZX window size, return 0. */ +unsigned +lzx_get_window_order(size_t max_block_size) { - if (window_size == 0 || (u32)window_size != window_size) - return false; - u32 order = bsr32(window_size); - if (window_size != 1U << order) - return false; - return (order >= LZX_MIN_WINDOW_ORDER && order <= LZX_MAX_WINDOW_ORDER); + unsigned order; + + if (max_block_size == 0 || max_block_size > LZX_MAX_WINDOW_SIZE) + return 0; + + order = fls32(max_block_size); + + if (((u32)1 << order) != max_block_size) + order++; + + return max(order, LZX_MIN_WINDOW_ORDER); } -/* Given a valid LZX window size, return the number of symbols that will exist +/* Given a valid LZX window order, return the number of symbols that will exist * in the main Huffman code. */ unsigned -lzx_get_num_main_syms(u32 window_size) +lzx_get_num_main_syms(unsigned window_order) { + u32 window_size = (u32)1 << window_order; + /* NOTE: the calculation *should* be as follows: * * u32 max_offset = window_size - LZX_MIN_MATCH_LEN; - * u32 max_formatted_offset = max_offset + LZX_OFFSET_OFFSET; - * u32 num_position_slots = 1 + lzx_get_position_slot_raw(max_formatted_offset); + * u32 max_adjusted_offset = max_offset + LZX_OFFSET_OFFSET; + * u32 num_offset_slots = 1 + lzx_get_offset_slot_raw(max_adjusted_offset); * * However since LZX_MIN_MATCH_LEN == LZX_OFFSET_OFFSET, we would get - * max_formatted_offset == window_size, which would bump the number of - * position slots up by 1 since every valid LZX window size is equal to - * a position base value. The format doesn't do this, and instead + * max_adjusted_offset == window_size, which would bump the number of + * offset slots up by 1 since every valid LZX window size is equal to a + * offset slot base value. The format doesn't do this, and instead * disallows matches with minimum length and maximum offset. This sets - * max_formatted_offset = window_size - 1, so instead we must calculate: + * max_adjusted_offset = window_size - 1, so instead we must calculate: * - * num_position_slots = 1 + lzx_get_position_slot_raw(window_size - 1); + * num_offset_slots = 1 + lzx_get_offset_slot_raw(window_size - 1); * * ... which is the same as * - * num_position_slots = lzx_get_position_slot_raw(window_size); + * num_offset_slots = lzx_get_offset_slot_raw(window_size); * - * ... since every valid window size is equal to a position base value. + * ... since every valid window size is equal to an offset base value. */ - unsigned num_position_slots = lzx_get_position_slot_raw(window_size); + unsigned num_offset_slots = lzx_get_offset_slot_raw(window_size); /* Now calculate the number of main symbols as LZX_NUM_CHARS literal - * symbols, plus 8 symbols per position slot (since there are 8 possible - * length headers, and we need all (position slot, length header) + * symbols, plus 8 symbols per offset slot (since there are 8 possible + * length headers, and we need all (offset slot, length header) * combinations). */ - return LZX_NUM_CHARS + (num_position_slots << 3); + return LZX_NUM_CHARS + (num_offset_slots << 3); } static void -do_translate_target(s32 *target, s32 input_pos) +do_translate_target(void *target, s32 input_pos) { s32 abs_offset, rel_offset; - /* XXX: This assumes unaligned memory accesses are okay. */ - rel_offset = le32_to_cpu(*target); + rel_offset = get_unaligned_u32_le(target); if (rel_offset >= -input_pos && rel_offset < LZX_WIM_MAGIC_FILESIZE) { if (rel_offset < LZX_WIM_MAGIC_FILESIZE - input_pos) { /* "good translation" */ @@ -129,30 +137,27 @@ do_translate_target(s32 *target, s32 input_pos) /* "compensating translation" */ abs_offset = rel_offset - LZX_WIM_MAGIC_FILESIZE; } - *target = cpu_to_le32(abs_offset); + put_unaligned_u32_le(abs_offset, target); } } static void -undo_translate_target(s32 *target, s32 input_pos) +undo_translate_target(void *target, s32 input_pos) { s32 abs_offset, rel_offset; - /* XXX: This assumes unaligned memory accesses are okay. */ - abs_offset = le32_to_cpu(*target); + abs_offset = get_unaligned_u32_le(target); if (abs_offset >= 0) { if (abs_offset < LZX_WIM_MAGIC_FILESIZE) { /* "good translation" */ rel_offset = abs_offset - input_pos; - - *target = cpu_to_le32(rel_offset); + put_unaligned_u32_le(rel_offset, target); } } else { if (abs_offset >= -input_pos) { /* "compensating translation" */ rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE; - - *target = cpu_to_le32(rel_offset); + put_unaligned_u32_le(rel_offset, target); } } } @@ -183,104 +188,153 @@ undo_translate_target(s32 *target, s32 input_pos) * in calculating the translated jump targets. But in WIM files, this file size * is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000). */ -static -#ifndef __SSE2__ -inline /* Although inlining the 'process_target' function still speeds up the - SSE2 case, it bloats the binary more. */ -#endif -void -lzx_e8_filter(u8 *data, u32 size, void (*process_target)(s32 *, s32)) +static void +lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32)) { -#ifdef __SSE2__ - /* SSE2 vectorized implementation for x86_64. This speeds up LZX - * decompression by about 5-8% overall. (Usually --- the performance - * actually regresses slightly in the degenerate case that the data - * consists entirely of 0xe8 bytes. Also, this optimization affects - * compression as well, but the percentage improvement is less because - * LZX compression is much slower than LZX decompression. ) */ - __m128i *p128 = (__m128i *)data; - u32 valid_mask = 0xFFFFFFFF; - - if (size >= 32 && (uintptr_t)data % 16 == 0) { - __m128i * const end128 = p128 + size / 16 - 1; - - /* Create a vector of all 0xe8 bytes */ - const __m128i e8_bytes = _mm_set1_epi8(0xe8); - - /* Iterate through the 16-byte vectors in the input. */ - do { - /* Compare the current 16-byte vector with the vector of - * all 0xe8 bytes. This produces 0xff where the byte is - * 0xe8 and 0x00 where it is not. */ - __m128i cmpresult = _mm_cmpeq_epi8(*p128, e8_bytes); - - /* Map the comparison results into a single 16-bit - * number. It will contain a 1 bit when the - * corresponding byte in the current 16-byte vector is - * an e8 byte. Note: the low-order bit corresponds to - * the first (lowest address) byte. */ - u32 e8_mask = _mm_movemask_epi8(cmpresult); - - if (!e8_mask) { - /* If e8_mask is 0, then none of these 16 bytes - * have value 0xe8. No e8 translation is - * needed, and there is no restriction that - * carries over to the next 16 bytes. */ - valid_mask = 0xFFFFFFFF; - } else { - /* At least one byte has value 0xe8. - * - * The AND with valid_mask accounts for the fact - * that we can't start an e8 translation that - * overlaps the previous one. */ - while ((e8_mask &= valid_mask)) { - - /* Count the number of trailing zeroes - * in e8_mask. This will produce the - * index of the byte, within the 16, at - * which the next e8 translation should - * be done. */ - u32 bit = __builtin_ctz(e8_mask); - - /* Do (or undo) the e8 translation. */ - u8 *p8 = (u8 *)p128 + bit; - (*process_target)((s32 *)(p8 + 1), - p8 - data); - - /* Don't start an e8 translation in the - * next 4 bytes. */ - valid_mask &= ~((u32)0x1F << bit); - } - /* Moving on to the next vector. Shift and set - * valid_mask accordingly. */ - valid_mask >>= 16; - valid_mask |= 0xFFFF0000; - } - } while (++p128 < end128); + +#if !defined(__SSE2__) && !defined(__AVX2__) + /* + * A worthwhile optimization is to push the end-of-buffer check into the + * relatively rare E8 case. This is possible if we replace the last six + * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte + * before reaching end-of-buffer. In addition, this scheme guarantees + * that no translation can begin following an E8 byte in the last 10 + * bytes because a 4-byte offset containing E8 as its high byte is a + * large negative number that is not valid for translation. That is + * exactly what we need. + */ + u8 *tail; + u8 saved_bytes[6]; + u8 *p; + + if (size <= 10) + return; + + tail = &data[size - 6]; + memcpy(saved_bytes, tail, 6); + memset(tail, 0xE8, 6); + p = data; + for (;;) { + while (*p != 0xE8) + p++; + if (p >= tail) + break; + (*process_target)(p + 1, p - data); + p += 5; } + memcpy(tail, saved_bytes, 6); +#else + /* SSE2 or AVX-2 optimized version for x86_64 */ + + u8 *p = data; + u64 valid_mask = ~0; - u8 *p8 = (u8 *)p128; - while (!(valid_mask & 1)) { - p8++; + if (size <= 10) + return; +#ifdef __AVX2__ +# define ALIGNMENT_REQUIRED 32 +#else +# define ALIGNMENT_REQUIRED 16 +#endif + + /* Process one byte at a time until the pointer is properly aligned. */ + while ((uintptr_t)p % ALIGNMENT_REQUIRED != 0) { + if (p >= data + size - 10) + return; + if (*p == 0xE8 && (valid_mask & 1)) { + (*process_target)(p + 1, p - data); + valid_mask &= ~0x1F; + } + p++; valid_mask >>= 1; + valid_mask |= (u64)1 << 63; } -#else /* __SSE2__ */ - u8 *p8 = data; -#endif /* !__SSE2__ */ - - if (size > 10) { - /* Finish any bytes that weren't processed by the vectorized - * implementation. */ - u8 *p8_end = data + size - 10; - do { - if (*p8 == 0xe8) { - (*process_target)((s32 *)(p8 + 1), p8 - data); - p8 += 5; - } else { - p8++; + + if (data + size - p >= 64) { + + /* Vectorized processing */ + + /* Note: we use a "trap" E8 byte to eliminate the need to check + * for end-of-buffer in the inner loop. This byte is carefully + * positioned so that it will never be changed by a previous + * translation before it is detected. */ + + u8 *trap = p + ((data + size - p) & ~31) - 32 + 4; + u8 saved_byte = *trap; + *trap = 0xE8; + + for (;;) { + u32 e8_mask; + u8 *orig_p = p; + #ifdef __SSE2__ + const __m128i e8_bytes = _mm_set1_epi8(0xE8); + for (;;) { + /* Read the next 32 bytes of data and test them + * for E8 bytes. */ + __m128i bytes1 = *(const __m128i *)p; + __m128i bytes2 = *(const __m128i *)(p + 16); + __m128i cmpresult1 = _mm_cmpeq_epi8(bytes1, e8_bytes); + __m128i cmpresult2 = _mm_cmpeq_epi8(bytes2, e8_bytes); + u32 mask1 = _mm_movemask_epi8(cmpresult1); + u32 mask2 = _mm_movemask_epi8(cmpresult2); + /* The masks have a bit set for each E8 byte. + * We stay in this fast inner loop as long as + * there are no E8 bytes. */ + if (mask1 | mask2) { + e8_mask = mask1 | (mask2 << 16); + break; + } + p += 32; } - } while (p8 < p8_end); + #else + /* AVX-2 */ + const __m256i e8_bytes = _mm256_set1_epi8(0xE8); + for (;;) { + __m256i bytes = *(const __m256i *)p; + __m256i cmpresult = _mm256_cmpeq_epi8(bytes, e8_bytes); + e8_mask = _mm256_movemask_epi8(cmpresult); + if (e8_mask) + break; + p += 32; + } + #endif + + /* Did we pass over data with no E8 bytes? */ + if (p != orig_p) + valid_mask = ~0; + + /* Are we nearing end-of-buffer? */ + if (p == trap - 4) + break; + + /* Process the E8 bytes. However, the AND with + * 'valid_mask' ensures we never process an E8 byte that + * was itself part of a translation target. */ + while ((e8_mask &= valid_mask)) { + unsigned bit = ffs32(e8_mask); + (*process_target)(p + bit + 1, p + bit - data); + valid_mask &= ~((u64)0x1F << bit); + } + + valid_mask >>= 32; + valid_mask |= 0xFFFFFFFF00000000; + p += 32; + } + + *trap = saved_byte; + } + + /* Approaching the end of the buffer; process one byte a time. */ + while (p < data + size - 10) { + if (*p == 0xE8 && (valid_mask & 1)) { + (*process_target)(p + 1, p - data); + valid_mask &= ~0x1F; + } + p++; + valid_mask >>= 1; + valid_mask |= (u64)1 << 63; } +#endif /* __SSE2__ || __AVX2__ */ } void