/*
* Copyright (C) 2012, 2013 Eric Biggers
*
- * This file is part of wimlib, a library for working with WIM files.
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
*
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 3 of the License, or (at your option)
- * any later version.
- *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
- * You should have received a copy of the GNU General Public License
- * along with wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
+#include "wimlib/bitops.h"
+#include "wimlib/endianness.h"
#include "wimlib/lzx.h"
+#include "wimlib/unaligned.h"
+#include "wimlib/util.h"
+
+#ifdef __SSE2__
+# include <emmintrin.h>
+#endif
-#ifdef USE_LZX_EXTRA_BITS_ARRAY
-/* LZX uses what it calls 'position slots' to represent match offsets.
- * What this means is that a small 'position slot' number and a small
- * offset from that slot are encoded instead of one large offset for
- * every match.
- * - lzx_position_base is an index to the position slot bases
- * - lzx_extra_bits states how many bits of offset-from-base data is needed.
+/* Mapping: offset slot => first match offset that uses that offset slot.
*/
-const u8 lzx_extra_bits[] = {
+const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS] = {
+ 0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */
+ 6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */
+ 32 , 48 , 64 , 96 , 128 , /* 10 --- 14 */
+ 192 , 256 , 384 , 512 , 768 , /* 15 --- 19 */
+ 1024 , 1536 , 2048 , 3072 , 4096 , /* 20 --- 24 */
+ 6144 , 8192 , 12288 , 16384 , 24576 , /* 25 --- 29 */
+ 32768 , 49152 , 65536 , 98304 , 131072 , /* 30 --- 34 */
+ 196608 , 262144 , 393216 , 524288 , 655360 , /* 35 --- 39 */
+ 786432 , 917504 , 1048576, 1179648, 1310720, /* 40 --- 44 */
+ 1441792, 1572864, 1703936, 1835008, 1966080, /* 45 --- 49 */
+ 2097152 /* 50 */
+};
+
+/* Mapping: offset slot => how many extra bits must be read and added to the
+ * corresponding offset slot base to decode the match offset. */
+const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS] = {
0 , 0 , 0 , 0 , 1 ,
1 , 2 , 2 , 3 , 3 ,
4 , 4 , 5 , 5 , 6 ,
17, 17, 17, 17, 17,
17
};
+
+/* Round the specified compression block size (not LZX block size) up to the
+ * next valid LZX window size, and return its order (log2). Or, if the block
+ * size is 0 or greater than the largest valid LZX window size, return 0. */
+unsigned
+lzx_get_window_order(size_t max_block_size)
+{
+ unsigned order;
+
+ if (max_block_size == 0 || max_block_size > LZX_MAX_WINDOW_SIZE)
+ return 0;
+
+ order = fls32(max_block_size);
+
+ if (((u32)1 << order) != max_block_size)
+ order++;
+
+ return max(order, LZX_MIN_WINDOW_ORDER);
+}
+
+/* Given a valid LZX window order, return the number of symbols that will exist
+ * in the main Huffman code. */
+unsigned
+lzx_get_num_main_syms(unsigned window_order)
+{
+ u32 window_size = (u32)1 << window_order;
+
+ /* NOTE: the calculation *should* be as follows:
+ *
+ * u32 max_offset = window_size - LZX_MIN_MATCH_LEN;
+ * u32 max_adjusted_offset = max_offset + LZX_OFFSET_OFFSET;
+ * u32 num_offset_slots = 1 + lzx_get_offset_slot_raw(max_adjusted_offset);
+ *
+ * However since LZX_MIN_MATCH_LEN == LZX_OFFSET_OFFSET, we would get
+ * max_adjusted_offset == window_size, which would bump the number of
+ * offset slots up by 1 since every valid LZX window size is equal to a
+ * offset slot base value. The format doesn't do this, and instead
+ * disallows matches with minimum length and maximum offset. This sets
+ * max_adjusted_offset = window_size - 1, so instead we must calculate:
+ *
+ * num_offset_slots = 1 + lzx_get_offset_slot_raw(window_size - 1);
+ *
+ * ... which is the same as
+ *
+ * num_offset_slots = lzx_get_offset_slot_raw(window_size);
+ *
+ * ... since every valid window size is equal to an offset base value.
+ */
+ unsigned num_offset_slots = lzx_get_offset_slot_raw(window_size);
+
+ /* Now calculate the number of main symbols as LZX_NUM_CHARS literal
+ * symbols, plus 8 symbols per offset slot (since there are 8 possible
+ * length headers, and we need all (offset slot, length header)
+ * combinations). */
+ return LZX_NUM_CHARS + (num_offset_slots << 3);
+}
+
+static void
+do_translate_target(void *target, s32 input_pos)
+{
+ s32 abs_offset, rel_offset;
+
+ rel_offset = get_unaligned_u32_le(target);
+ if (rel_offset >= -input_pos && rel_offset < LZX_WIM_MAGIC_FILESIZE) {
+ if (rel_offset < LZX_WIM_MAGIC_FILESIZE - input_pos) {
+ /* "good translation" */
+ abs_offset = rel_offset + input_pos;
+ } else {
+ /* "compensating translation" */
+ abs_offset = rel_offset - LZX_WIM_MAGIC_FILESIZE;
+ }
+ put_unaligned_u32_le(abs_offset, target);
+ }
+}
+
+static void
+undo_translate_target(void *target, s32 input_pos)
+{
+ s32 abs_offset, rel_offset;
+
+ abs_offset = get_unaligned_u32_le(target);
+ if (abs_offset >= 0) {
+ if (abs_offset < LZX_WIM_MAGIC_FILESIZE) {
+ /* "good translation" */
+ rel_offset = abs_offset - input_pos;
+ put_unaligned_u32_le(rel_offset, target);
+ }
+ } else {
+ if (abs_offset >= -input_pos) {
+ /* "compensating translation" */
+ rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE;
+ put_unaligned_u32_le(rel_offset, target);
+ }
+ }
+}
+
+/*
+ * Do or undo the 'E8' preprocessing used in LZX. Before compression, the
+ * uncompressed data is preprocessed by changing the targets of x86 CALL
+ * instructions from relative offsets to absolute offsets. After decompression,
+ * the translation is undone by changing the targets of x86 CALL instructions
+ * from absolute offsets to relative offsets.
+ *
+ * Note that despite its intent, E8 preprocessing can be done on any data even
+ * if it is not actually x86 machine code. In fact, E8 preprocessing appears to
+ * always be used in LZX-compressed resources in WIM files; there is no bit to
+ * indicate whether it is used or not, unlike in the LZX compressed format as
+ * used in cabinet files, where a bit is reserved for that purpose.
+ *
+ * E8 preprocessing is disabled in the last 6 bytes of the uncompressed data,
+ * which really means the 5-byte call instruction cannot start in the last 10
+ * bytes of the uncompressed data. This is one of the errors in the LZX
+ * documentation.
+ *
+ * E8 preprocessing does not appear to be disabled after the 32768th chunk of a
+ * WIM resource, which apparently is another difference from the LZX compression
+ * used in cabinet files.
+ *
+ * E8 processing is supposed to take the file size as a parameter, as it is used
+ * in calculating the translated jump targets. But in WIM files, this file size
+ * is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000).
+ */
+static
+#ifndef __SSE2__
+inline /* Although inlining the 'process_target' function still speeds up the
+ SSE2 case, it bloats the binary more. */
#endif
+void
+lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32))
+{
+ u8 *p = data;
+#ifdef __SSE2__
+ /* SSE2 vectorized implementation for x86_64. This speeds up LZX
+ * decompression by about 5-8% overall. (Usually --- the performance
+ * actually regresses slightly in the degenerate case that the data
+ * consists entirely of 0xe8 bytes. Also, this optimization affects
+ * compression as well, but the percentage improvement is less because
+ * LZX compression is much slower than LZX decompression. ) */
+ if (size >= 32 && (uintptr_t)p % 16 == 0) {
-const u32 lzx_position_base[] = {
- 0 , 1 , 2 , 3 , 4 ,
- 6 , 8 , 12 , 16 , 24 ,
- 32 , 48 , 64 , 96 , 128 ,
- 192 , 256 , 384 , 512 , 768 ,
- 1024 , 1536 , 2048 , 3072 , 4096 ,
- 6144 , 8192 , 12288 , 16384 , 24576 ,
- 32768 , 49152 , 65536 , 98304 , 131072 ,
- 196608 , 262144 , 393216 , 524288 , 655360 ,
- 786432 , 917504 , 1048576, 1179648, 1310720,
- 1441792, 1572864, 1703936, 1835008, 1966080,
- 2097152
-};
+ u32 valid_mask = 0xFFFFFFFF;
+
+ u8 * const vec_end = p + (size & ~15) - 16;
+
+ /* Create a vector of all 0xe8 bytes */
+ const __m128i e8_bytes = _mm_set1_epi8(0xe8);
+
+ /* Iterate through the 16-byte vectors in the input. */
+ do {
+ /* Compare the current 16-byte vector with the vector of
+ * all 0xe8 bytes. This produces 0xff where the byte is
+ * 0xe8 and 0x00 where it is not. */
+ __m128i cmpresult = _mm_cmpeq_epi8(*(const __m128i *)p,
+ e8_bytes);
+
+ /* Map the comparison results into a single 16-bit
+ * number. It will contain a 1 bit when the
+ * corresponding byte in the current 16-byte vector is
+ * an e8 byte. Note: the low-order bit corresponds to
+ * the first (lowest address) byte. */
+ u32 e8_mask = _mm_movemask_epi8(cmpresult);
+
+ if (!e8_mask) {
+ /* If e8_mask is 0, then none of these 16 bytes
+ * have value 0xe8. No e8 translation is
+ * needed, and there is no restriction that
+ * carries over to the next 16 bytes. */
+ valid_mask = 0xFFFFFFFF;
+ } else {
+ /* At least one byte has value 0xe8.
+ *
+ * The AND with valid_mask accounts for the fact
+ * that we can't start an e8 translation that
+ * overlaps the previous one. */
+ while ((e8_mask &= valid_mask)) {
+
+ /* Count the number of trailing zeroes
+ * in e8_mask. This will produce the
+ * index of the byte, within the 16, at
+ * which the next e8 translation should
+ * be done. */
+ int bit = ffs32(e8_mask);
+
+ /* Do (or undo) the e8 translation. */
+ (*process_target)(p + bit + 1,
+ p + bit - data);
+
+ /* Don't start an e8 translation in the
+ * next 4 bytes. */
+ valid_mask &= ~((u32)0x1F << bit);
+ }
+ /* Moving on to the next vector. Shift and set
+ * valid_mask accordingly. */
+ valid_mask >>= 16;
+ valid_mask |= 0xFFFF0000;
+ }
+ } while ((p += 16) < vec_end);
+
+ while (!(valid_mask & 1)) {
+ p++;
+ valid_mask >>= 1;
+ }
+ }
+#endif /* !__SSE2__ */
+
+ if (size > 10) {
+ /* Finish any bytes that weren't processed by the vectorized
+ * implementation. */
+ u8 *end = data + size - 10;
+ do {
+ if (*p == 0xe8) {
+ (*process_target)(p + 1, p - data);
+ p += 5;
+ } else {
+ p++;
+ }
+ } while (p < end);
+ }
+}
+
+void
+lzx_do_e8_preprocessing(u8 *data, u32 size)
+{
+ lzx_e8_filter(data, size, do_translate_target);
+}
+void
+lzx_undo_e8_preprocessing(u8 *data, u32 size)
+{
+ lzx_e8_filter(data, size, undo_translate_target);
+}