2 * lzx_common.c - Common code for LZX compression and decompression.
6 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
8 * This file is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU Lesser General Public License as published by the Free
10 * Software Foundation; either version 3 of the License, or (at your option) any
13 * This file is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
15 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this file; if not, see http://www.gnu.org/licenses/.
29 # include <emmintrin.h>
33 # include <immintrin.h>
36 #include "wimlib/bitops.h"
37 #include "wimlib/endianness.h"
38 #include "wimlib/lzx_common.h"
39 #include "wimlib/unaligned.h"
40 #include "wimlib/util.h"
42 /* Mapping: offset slot => first match offset that uses that offset slot.
44 const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS + 1] = {
45 0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */
46 6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */
47 32 , 48 , 64 , 96 , 128 , /* 10 --- 14 */
48 192 , 256 , 384 , 512 , 768 , /* 15 --- 19 */
49 1024 , 1536 , 2048 , 3072 , 4096 , /* 20 --- 24 */
50 6144 , 8192 , 12288 , 16384 , 24576 , /* 25 --- 29 */
51 32768 , 49152 , 65536 , 98304 , 131072 , /* 30 --- 34 */
52 196608 , 262144 , 393216 , 524288 , 655360 , /* 35 --- 39 */
53 786432 , 917504 , 1048576, 1179648, 1310720, /* 40 --- 44 */
54 1441792, 1572864, 1703936, 1835008, 1966080, /* 45 --- 49 */
58 /* Mapping: offset slot => how many extra bits must be read and added to the
59 * corresponding offset slot base to decode the match offset. */
60 const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS] = {
73 /* Round the specified buffer size up to the next valid LZX window size, and
74 * return its order (log2). Or, if the buffer size is 0 or greater than the
75 * largest valid LZX window size, return 0. */
77 lzx_get_window_order(size_t max_bufsize)
81 if (max_bufsize == 0 || max_bufsize > LZX_MAX_WINDOW_SIZE)
84 order = fls32(max_bufsize);
86 if (((u32)1 << order) != max_bufsize)
89 return max(order, LZX_MIN_WINDOW_ORDER);
92 /* Given a valid LZX window order, return the number of symbols that will exist
93 * in the main Huffman code. */
95 lzx_get_num_main_syms(unsigned window_order)
97 /* Note: one would expect that the maximum match offset would be
98 * 'window_size - LZX_MIN_MATCH_LEN', which would occur if the first two
99 * bytes were to match the last two bytes. However, the format
100 * disallows this case. This reduces the number of needed offset slots
102 u32 window_size = (u32)1 << window_order;
103 u32 max_adjusted_offset = (window_size - LZX_MIN_MATCH_LEN - 1) +
104 LZX_OFFSET_ADJUSTMENT;
105 unsigned num_offset_slots = 30;
106 while (max_adjusted_offset >= lzx_offset_slot_base[num_offset_slots])
109 return LZX_NUM_CHARS + (num_offset_slots * LZX_NUM_LEN_HEADERS);
113 do_translate_target(void *target, s32 input_pos)
115 s32 abs_offset, rel_offset;
117 rel_offset = get_unaligned_u32_le(target);
118 if (rel_offset >= -input_pos && rel_offset < LZX_WIM_MAGIC_FILESIZE) {
119 if (rel_offset < LZX_WIM_MAGIC_FILESIZE - input_pos) {
120 /* "good translation" */
121 abs_offset = rel_offset + input_pos;
123 /* "compensating translation" */
124 abs_offset = rel_offset - LZX_WIM_MAGIC_FILESIZE;
126 put_unaligned_u32_le(abs_offset, target);
131 undo_translate_target(void *target, s32 input_pos)
133 s32 abs_offset, rel_offset;
135 abs_offset = get_unaligned_u32_le(target);
136 if (abs_offset >= 0) {
137 if (abs_offset < LZX_WIM_MAGIC_FILESIZE) {
138 /* "good translation" */
139 rel_offset = abs_offset - input_pos;
140 put_unaligned_u32_le(rel_offset, target);
143 if (abs_offset >= -input_pos) {
144 /* "compensating translation" */
145 rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE;
146 put_unaligned_u32_le(rel_offset, target);
152 * Do or undo the 'E8' preprocessing used in LZX. Before compression, the
153 * uncompressed data is preprocessed by changing the targets of x86 CALL
154 * instructions from relative offsets to absolute offsets. After decompression,
155 * the translation is undone by changing the targets of x86 CALL instructions
156 * from absolute offsets to relative offsets.
158 * Note that despite its intent, E8 preprocessing can be done on any data even
159 * if it is not actually x86 machine code. In fact, E8 preprocessing appears to
160 * always be used in LZX-compressed resources in WIM files; there is no bit to
161 * indicate whether it is used or not, unlike in the LZX compressed format as
162 * used in cabinet files, where a bit is reserved for that purpose.
164 * E8 preprocessing is disabled in the last 6 bytes of the uncompressed data,
165 * which really means the 5-byte call instruction cannot start in the last 10
166 * bytes of the uncompressed data. This is one of the errors in the LZX
169 * E8 preprocessing does not appear to be disabled after the 32768th chunk of a
170 * WIM resource, which apparently is another difference from the LZX compression
171 * used in cabinet files.
173 * E8 processing is supposed to take the file size as a parameter, as it is used
174 * in calculating the translated jump targets. But in WIM files, this file size
175 * is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000).
178 lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32))
181 #if !defined(__SSE2__) && !defined(__AVX2__)
183 * A worthwhile optimization is to push the end-of-buffer check into the
184 * relatively rare E8 case. This is possible if we replace the last six
185 * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
186 * before reaching end-of-buffer. In addition, this scheme guarantees
187 * that no translation can begin following an E8 byte in the last 10
188 * bytes because a 4-byte offset containing E8 as its high byte is a
189 * large negative number that is not valid for translation. That is
190 * exactly what we need.
199 tail = &data[size - 6];
200 memcpy(saved_bytes, tail, 6);
201 memset(tail, 0xE8, 6);
208 (*process_target)(p + 1, p - data);
211 memcpy(tail, saved_bytes, 6);
213 /* SSE2 or AVX-2 optimized version for x86_64 */
221 # define ALIGNMENT_REQUIRED 32
223 # define ALIGNMENT_REQUIRED 16
226 /* Process one byte at a time until the pointer is properly aligned. */
227 while ((uintptr_t)p % ALIGNMENT_REQUIRED != 0) {
228 if (p >= data + size - 10)
230 if (*p == 0xE8 && (valid_mask & 1)) {
231 (*process_target)(p + 1, p - data);
236 valid_mask |= (u64)1 << 63;
239 if (data + size - p >= 64) {
241 /* Vectorized processing */
243 /* Note: we use a "trap" E8 byte to eliminate the need to check
244 * for end-of-buffer in the inner loop. This byte is carefully
245 * positioned so that it will never be changed by a previous
246 * translation before it is detected. */
248 u8 *trap = p + ((data + size - p) & ~31) - 32 + 4;
249 u8 saved_byte = *trap;
256 const __m256i e8_bytes = _mm256_set1_epi8(0xE8);
258 __m256i bytes = *(const __m256i *)p;
259 __m256i cmpresult = _mm256_cmpeq_epi8(bytes, e8_bytes);
260 e8_mask = _mm256_movemask_epi8(cmpresult);
266 const __m128i e8_bytes = _mm_set1_epi8(0xE8);
268 /* Read the next 32 bytes of data and test them
270 __m128i bytes1 = *(const __m128i *)p;
271 __m128i bytes2 = *(const __m128i *)(p + 16);
272 __m128i cmpresult1 = _mm_cmpeq_epi8(bytes1, e8_bytes);
273 __m128i cmpresult2 = _mm_cmpeq_epi8(bytes2, e8_bytes);
274 u32 mask1 = _mm_movemask_epi8(cmpresult1);
275 u32 mask2 = _mm_movemask_epi8(cmpresult2);
276 /* The masks have a bit set for each E8 byte.
277 * We stay in this fast inner loop as long as
278 * there are no E8 bytes. */
280 e8_mask = mask1 | (mask2 << 16);
287 /* Did we pass over data with no E8 bytes? */
291 /* Are we nearing end-of-buffer? */
295 /* Process the E8 bytes. However, the AND with
296 * 'valid_mask' ensures we never process an E8 byte that
297 * was itself part of a translation target. */
298 while ((e8_mask &= valid_mask)) {
299 unsigned bit = ffs32(e8_mask);
300 (*process_target)(p + bit + 1, p + bit - data);
301 valid_mask &= ~((u64)0x1F << bit);
305 valid_mask |= 0xFFFFFFFF00000000;
312 /* Approaching the end of the buffer; process one byte a time. */
313 while (p < data + size - 10) {
314 if (*p == 0xE8 && (valid_mask & 1)) {
315 (*process_target)(p + 1, p - data);
320 valid_mask |= (u64)1 << 63;
322 #endif /* __SSE2__ || __AVX2__ */
326 lzx_do_e8_preprocessing(u8 *data, u32 size)
328 lzx_e8_filter(data, size, do_translate_target);
332 lzx_undo_e8_preprocessing(u8 *data, u32 size)
334 lzx_e8_filter(data, size, undo_translate_target);