2 * lzx_common.c - Common code for LZX compression and decompression.
6 * Copyright (C) 2012, 2013, 2014 Eric Biggers
8 * This file is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU Lesser General Public License as published by the Free
10 * Software Foundation; either version 3 of the License, or (at your option) any
13 * This file is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
15 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this file; if not, see http://www.gnu.org/licenses/.
28 #include "wimlib/bitops.h"
29 #include "wimlib/endianness.h"
30 #include "wimlib/lzx_common.h"
31 #include "wimlib/unaligned.h"
32 #include "wimlib/util.h"
35 # include <emmintrin.h>
39 # include <immintrin.h>
42 /* Mapping: offset slot => first match offset that uses that offset slot.
44 const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS] = {
45 0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */
46 6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */
47 32 , 48 , 64 , 96 , 128 , /* 10 --- 14 */
48 192 , 256 , 384 , 512 , 768 , /* 15 --- 19 */
49 1024 , 1536 , 2048 , 3072 , 4096 , /* 20 --- 24 */
50 6144 , 8192 , 12288 , 16384 , 24576 , /* 25 --- 29 */
51 32768 , 49152 , 65536 , 98304 , 131072 , /* 30 --- 34 */
52 196608 , 262144 , 393216 , 524288 , 655360 , /* 35 --- 39 */
53 786432 , 917504 , 1048576, 1179648, 1310720, /* 40 --- 44 */
54 1441792, 1572864, 1703936, 1835008, 1966080, /* 45 --- 49 */
58 /* Mapping: offset slot => how many extra bits must be read and added to the
59 * corresponding offset slot base to decode the match offset. */
60 const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS] = {
74 /* Round the specified compression block size (not LZX block size) up to the
75 * next valid LZX window size, and return its order (log2). Or, if the block
76 * size is 0 or greater than the largest valid LZX window size, return 0. */
78 lzx_get_window_order(size_t max_block_size)
82 if (max_block_size == 0 || max_block_size > LZX_MAX_WINDOW_SIZE)
85 order = fls32(max_block_size);
87 if (((u32)1 << order) != max_block_size)
90 return max(order, LZX_MIN_WINDOW_ORDER);
93 /* Given a valid LZX window order, return the number of symbols that will exist
94 * in the main Huffman code. */
96 lzx_get_num_main_syms(unsigned window_order)
98 u32 window_size = (u32)1 << window_order;
100 /* NOTE: the calculation *should* be as follows:
102 * u32 max_offset = window_size - LZX_MIN_MATCH_LEN;
103 * u32 max_adjusted_offset = max_offset + LZX_OFFSET_OFFSET;
104 * u32 num_offset_slots = 1 + lzx_get_offset_slot_raw(max_adjusted_offset);
106 * However since LZX_MIN_MATCH_LEN == LZX_OFFSET_OFFSET, we would get
107 * max_adjusted_offset == window_size, which would bump the number of
108 * offset slots up by 1 since every valid LZX window size is equal to a
109 * offset slot base value. The format doesn't do this, and instead
110 * disallows matches with minimum length and maximum offset. This sets
111 * max_adjusted_offset = window_size - 1, so instead we must calculate:
113 * num_offset_slots = 1 + lzx_get_offset_slot_raw(window_size - 1);
115 * ... which is the same as
117 * num_offset_slots = lzx_get_offset_slot_raw(window_size);
119 * ... since every valid window size is equal to an offset base value.
121 unsigned num_offset_slots = lzx_get_offset_slot_raw(window_size);
123 /* Now calculate the number of main symbols as LZX_NUM_CHARS literal
124 * symbols, plus 8 symbols per offset slot (since there are 8 possible
125 * length headers, and we need all (offset slot, length header)
127 return LZX_NUM_CHARS + (num_offset_slots << 3);
131 do_translate_target(void *target, s32 input_pos)
133 s32 abs_offset, rel_offset;
135 rel_offset = get_unaligned_u32_le(target);
136 if (rel_offset >= -input_pos && rel_offset < LZX_WIM_MAGIC_FILESIZE) {
137 if (rel_offset < LZX_WIM_MAGIC_FILESIZE - input_pos) {
138 /* "good translation" */
139 abs_offset = rel_offset + input_pos;
141 /* "compensating translation" */
142 abs_offset = rel_offset - LZX_WIM_MAGIC_FILESIZE;
144 put_unaligned_u32_le(abs_offset, target);
149 undo_translate_target(void *target, s32 input_pos)
151 s32 abs_offset, rel_offset;
153 abs_offset = get_unaligned_u32_le(target);
154 if (abs_offset >= 0) {
155 if (abs_offset < LZX_WIM_MAGIC_FILESIZE) {
156 /* "good translation" */
157 rel_offset = abs_offset - input_pos;
158 put_unaligned_u32_le(rel_offset, target);
161 if (abs_offset >= -input_pos) {
162 /* "compensating translation" */
163 rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE;
164 put_unaligned_u32_le(rel_offset, target);
170 * Do or undo the 'E8' preprocessing used in LZX. Before compression, the
171 * uncompressed data is preprocessed by changing the targets of x86 CALL
172 * instructions from relative offsets to absolute offsets. After decompression,
173 * the translation is undone by changing the targets of x86 CALL instructions
174 * from absolute offsets to relative offsets.
176 * Note that despite its intent, E8 preprocessing can be done on any data even
177 * if it is not actually x86 machine code. In fact, E8 preprocessing appears to
178 * always be used in LZX-compressed resources in WIM files; there is no bit to
179 * indicate whether it is used or not, unlike in the LZX compressed format as
180 * used in cabinet files, where a bit is reserved for that purpose.
182 * E8 preprocessing is disabled in the last 6 bytes of the uncompressed data,
183 * which really means the 5-byte call instruction cannot start in the last 10
184 * bytes of the uncompressed data. This is one of the errors in the LZX
187 * E8 preprocessing does not appear to be disabled after the 32768th chunk of a
188 * WIM resource, which apparently is another difference from the LZX compression
189 * used in cabinet files.
191 * E8 processing is supposed to take the file size as a parameter, as it is used
192 * in calculating the translated jump targets. But in WIM files, this file size
193 * is always the same (LZX_WIM_MAGIC_FILESIZE == 12000000).
196 lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32))
199 #if !defined(__SSE2__) && !defined(__AVX2__)
201 * A worthwhile optimization is to push the end-of-buffer check into the
202 * relatively rare E8 case. This is possible if we replace the last six
203 * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
204 * before reaching end-of-buffer. In addition, this scheme guarantees
205 * that no translation can begin following an E8 byte in the last 10
206 * bytes because a 4-byte offset containing E8 as its high byte is a
207 * large negative number that is not valid for translation. That is
208 * exactly what we need.
217 tail = &data[size - 6];
218 memcpy(saved_bytes, tail, 6);
219 memset(tail, 0xE8, 6);
226 (*process_target)(p + 1, p - data);
229 memcpy(tail, saved_bytes, 6);
231 /* SSE2 or AVX-2 optimized version for x86_64 */
239 # define ALIGNMENT_REQUIRED 32
241 # define ALIGNMENT_REQUIRED 16
244 /* Process one byte at a time until the pointer is properly aligned. */
245 while ((uintptr_t)p % ALIGNMENT_REQUIRED != 0) {
246 if (p >= data + size - 10)
248 if (*p == 0xE8 && (valid_mask & 1)) {
249 (*process_target)(p + 1, p - data);
254 valid_mask |= (u64)1 << 63;
257 if (data + size - p >= 64) {
259 /* Vectorized processing */
261 /* Note: we use a "trap" E8 byte to eliminate the need to check
262 * for end-of-buffer in the inner loop. This byte is carefully
263 * positioned so that it will never be changed by a previous
264 * translation before it is detected. */
266 u8 *trap = p + ((data + size - p) & ~31) - 32 + 4;
267 u8 saved_byte = *trap;
274 const __m256i e8_bytes = _mm256_set1_epi8(0xE8);
276 __m256i bytes = *(const __m256i *)p;
277 __m256i cmpresult = _mm256_cmpeq_epi8(bytes, e8_bytes);
278 e8_mask = _mm256_movemask_epi8(cmpresult);
284 const __m128i e8_bytes = _mm_set1_epi8(0xE8);
286 /* Read the next 32 bytes of data and test them
288 __m128i bytes1 = *(const __m128i *)p;
289 __m128i bytes2 = *(const __m128i *)(p + 16);
290 __m128i cmpresult1 = _mm_cmpeq_epi8(bytes1, e8_bytes);
291 __m128i cmpresult2 = _mm_cmpeq_epi8(bytes2, e8_bytes);
292 u32 mask1 = _mm_movemask_epi8(cmpresult1);
293 u32 mask2 = _mm_movemask_epi8(cmpresult2);
294 /* The masks have a bit set for each E8 byte.
295 * We stay in this fast inner loop as long as
296 * there are no E8 bytes. */
298 e8_mask = mask1 | (mask2 << 16);
305 /* Did we pass over data with no E8 bytes? */
309 /* Are we nearing end-of-buffer? */
313 /* Process the E8 bytes. However, the AND with
314 * 'valid_mask' ensures we never process an E8 byte that
315 * was itself part of a translation target. */
316 while ((e8_mask &= valid_mask)) {
317 unsigned bit = ffs32(e8_mask);
318 (*process_target)(p + bit + 1, p + bit - data);
319 valid_mask &= ~((u64)0x1F << bit);
323 valid_mask |= 0xFFFFFFFF00000000;
330 /* Approaching the end of the buffer; process one byte a time. */
331 while (p < data + size - 10) {
332 if (*p == 0xE8 && (valid_mask & 1)) {
333 (*process_target)(p + 1, p - data);
338 valid_mask |= (u64)1 << 63;
340 #endif /* __SSE2__ || __AVX2__ */
344 lzx_do_e8_preprocessing(u8 *data, u32 size)
346 lzx_e8_filter(data, size, do_translate_target);
350 lzx_undo_e8_preprocessing(u8 *data, u32 size)
352 lzx_e8_filter(data, size, undo_translate_target);