4 * Code shared between the compressor and decompressor for the LZMS compression
9 * Copyright (C) 2013, 2014 Eric Biggers
11 * This file is free software; you can redistribute it and/or modify it under
12 * the terms of the GNU Lesser General Public License as published by the Free
13 * Software Foundation; either version 3 of the License, or (at your option) any
16 * This file is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this file; if not, see http://www.gnu.org/licenses/.
29 #include "wimlib/endianness.h"
30 #include "wimlib/lzms.h"
31 #include "wimlib/unaligned.h"
32 #include "wimlib/util.h"
36 /***************************************************************
37 * Constant tables initialized by lzms_compute_slots(): *
38 ***************************************************************/
40 /* Table: offset slot => offset slot base value */
41 u32 lzms_offset_slot_base[LZMS_MAX_NUM_OFFSET_SYMS + 1];
43 /* Table: offset slot => number of extra offset bits */
44 u8 lzms_extra_offset_bits[LZMS_MAX_NUM_OFFSET_SYMS];
46 /* Table: length slot => length slot base value */
47 u32 lzms_length_slot_base[LZMS_NUM_LEN_SYMS + 1];
49 /* Table: length slot => number of extra length bits */
50 u8 lzms_extra_length_bits[LZMS_NUM_LEN_SYMS];
53 lzms_get_slot(u32 value, const u32 slot_base_tab[], unsigned num_slots)
56 unsigned r = num_slots - 1;
59 unsigned slot = (l + r) / 2;
60 if (value >= slot_base_tab[slot]) {
61 if (value < slot_base_tab[slot + 1])
72 lzms_decode_delta_rle_slot_bases(u32 slot_bases[],
74 const u8 delta_run_lens[],
75 unsigned num_run_lens,
77 unsigned expected_num_slots)
83 for (unsigned i = 0; i < num_run_lens; i++) {
84 unsigned run_len = delta_run_lens[i];
88 extra_bits[slot - 1] = order;
89 slot_bases[slot] = base;
95 LZMS_ASSERT(slot == expected_num_slots);
97 slot_bases[slot] = final;
98 extra_bits[slot - 1] = bsr32(slot_bases[slot] - slot_bases[slot - 1]);
101 /* Initialize the global offset and length slot tables. */
103 lzms_compute_slots(void)
105 /* If an explicit formula that maps LZMS offset and length slots to slot
106 * bases exists, then it could be used here. But until one is found,
107 * the following code fills in the slots using the observation that the
108 * increase from one slot base to the next is an increasing power of 2.
109 * Therefore, run-length encoding of the delta of adjacent entries can
111 static const u8 offset_slot_delta_run_lens[] = {
112 9, 0, 9, 7, 10, 15, 15, 20,
113 20, 30, 33, 40, 42, 45, 60, 73,
117 static const u8 length_slot_delta_run_lens[] = {
118 27, 4, 6, 4, 5, 2, 1, 1,
119 1, 1, 1, 0, 0, 0, 0, 0,
124 lzms_decode_delta_rle_slot_bases(lzms_offset_slot_base,
125 lzms_extra_offset_bits,
126 offset_slot_delta_run_lens,
127 ARRAY_LEN(offset_slot_delta_run_lens),
129 LZMS_MAX_NUM_OFFSET_SYMS);
132 lzms_decode_delta_rle_slot_bases(lzms_length_slot_base,
133 lzms_extra_length_bits,
134 length_slot_delta_run_lens,
135 ARRAY_LEN(length_slot_delta_run_lens),
140 /* Initialize the global offset and length slot tables if not already done. */
142 lzms_init_slots(void)
144 static pthread_once_t once = PTHREAD_ONCE_INIT;
146 pthread_once(&once, lzms_compute_slots);
150 lzms_maybe_do_x86_translation(u8 data[restrict], s32 i, s32 num_op_bytes,
151 s32 * restrict closest_target_usage_p,
152 s32 last_target_usages[restrict],
153 s32 max_trans_offset, bool undo)
158 if (i - *closest_target_usage_p <= max_trans_offset) {
159 LZMS_DEBUG("Undid x86 translation at position %d "
160 "(opcode 0x%02x)", i, data[i]);
161 void *p32 = &data[i + num_op_bytes];
162 u32 n = le32_to_cpu(load_le32_unaligned(p32));
163 store_le32_unaligned(cpu_to_le32(n - i), p32);
165 pos = i + le16_to_cpu(load_le16_unaligned(&data[i + num_op_bytes]));
167 pos = i + le16_to_cpu(load_le16_unaligned(&data[i + num_op_bytes]));
169 if (i - *closest_target_usage_p <= max_trans_offset) {
170 LZMS_DEBUG("Did x86 translation at position %d "
171 "(opcode 0x%02x)", i, data[i]);
172 void *p32 = &data[i + num_op_bytes];
173 u32 n = le32_to_cpu(load_le32_unaligned(p32));
174 store_le32_unaligned(cpu_to_le32(n + i), p32);
178 i += num_op_bytes + sizeof(le32) - 1;
180 if (i - last_target_usages[pos] <= LZMS_X86_MAX_GOOD_TARGET_OFFSET)
181 *closest_target_usage_p = i;
183 last_target_usages[pos] = i;
189 lzms_may_x86_translate(const u8 p[restrict], s32 *restrict max_offset_ret)
191 /* Switch on first byte of the opcode, assuming it is really an x86
193 *max_offset_ret = LZMS_X86_MAX_TRANSLATION_OFFSET;
197 if (p[2] == 0x5 || p[2] == 0xd) {
198 /* Load relative (x86_64) */
201 } else if (p[1] == 0x8d) {
202 if ((p[2] & 0x7) == 0x5) {
203 /* Load effective address relative (x86_64) */
211 if ((p[2] & 0x7) == 0x5) {
212 /* Load effective address relative (x86_64) */
220 *max_offset_ret = LZMS_X86_MAX_TRANSLATION_OFFSET / 2;
229 if (p[1] == 0x83 && p[2] == 0x05) {
230 /* Lock add relative */
247 * Translate relative addresses embedded in x86 instructions into absolute
248 * addresses (@undo == %false), or undo this translation (@undo == %true).
250 * Absolute addresses are usually more compressible by LZ factorization.
252 * @last_target_usages must be a temporary array of length >= 65536.
255 lzms_x86_filter(u8 data[restrict], s32 size,
256 s32 last_target_usages[restrict], bool undo)
259 * Note: this filter runs unconditionally and uses a custom algorithm to
260 * detect data regions that probably contain x86 code.
262 * 'closest_target_usage' tracks the most recent position that has a
263 * good chance of being an x86 instruction. When the filter detects a
264 * likely x86 instruction, it updates this variable and considers the
265 * next 1023 bytes of data as valid for x86 translations.
267 * If part of the data does not, in fact, contain x86 machine code, then
268 * 'closest_target_usage' will, very likely, eventually fall more than
269 * 1023 bytes behind the current position. This results in x86
270 * translations being disabled until the next likely x86 instruction is
273 * Translations on relative call (e8 opcode) instructions are slightly
274 * more restricted. They require that the most recent likely x86
275 * instruction was in the last 511 bytes, rather than the last 1023
278 * To identify "likely x86 instructions", the algorithm attempts to
279 * track the position of the most recent potential relative-addressing
280 * instruction that referenced each possible memory address. If it
281 * finds two references to the same memory address within a 65535 byte
282 * window, the second reference is flagged as a likely x86 instruction.
283 * Since the instructions considered for translation necessarily use
284 * relative addressing, the algorithm does a tentative translation into
285 * absolute addresses. In addition, so that memory addresses can be
286 * looked up in an array of reasonable size (in this code,
287 * 'last_target_usages'), only the low-order 2 bytes of each address are
288 * considered significant.
291 s32 closest_target_usage = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1;
293 for (s32 i = 0; i < 65536; i++)
294 last_target_usages[i] = -LZMS_X86_MAX_GOOD_TARGET_OFFSET - 1;
296 for (s32 i = 1; i < size - 16; ) {
297 s32 max_trans_offset;
300 n = lzms_may_x86_translate(data + i, &max_trans_offset);
302 if (max_trans_offset) {
303 /* Recognized opcode. */
304 i = lzms_maybe_do_x86_translation(data, i, n,
305 &closest_target_usage,
310 /* Not a recognized opcode. */
317 lzms_init_lz_lru_queues(struct lzms_lz_lru_queues *lz)
319 /* Recent offsets for LZ matches */
320 for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++)
321 lz->recent_offsets[i] = i + 1;
324 lz->upcoming_offset = 0;
328 lzms_init_delta_lru_queues(struct lzms_delta_lru_queues *delta)
330 /* Recent offsets and powers for LZ matches */
331 for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++) {
332 delta->recent_offsets[i] = i + 1;
333 delta->recent_powers[i] = 0;
335 delta->prev_offset = 0;
336 delta->prev_power = 0;
337 delta->upcoming_offset = 0;
338 delta->upcoming_power = 0;
343 lzms_init_lru_queues(struct lzms_lru_queues *lru)
345 lzms_init_lz_lru_queues(&lru->lz);
346 lzms_init_delta_lru_queues(&lru->delta);
350 lzms_update_lz_lru_queue(struct lzms_lz_lru_queues *lz)
352 if (lz->prev_offset != 0) {
353 for (int i = LZMS_NUM_RECENT_OFFSETS - 1; i >= 0; i--)
354 lz->recent_offsets[i + 1] = lz->recent_offsets[i];
355 lz->recent_offsets[0] = lz->prev_offset;
357 lz->prev_offset = lz->upcoming_offset;
361 lzms_update_delta_lru_queues(struct lzms_delta_lru_queues *delta)
363 if (delta->prev_offset != 0) {
364 for (int i = LZMS_NUM_RECENT_OFFSETS - 1; i >= 0; i--) {
365 delta->recent_offsets[i + 1] = delta->recent_offsets[i];
366 delta->recent_powers[i + 1] = delta->recent_powers[i];
368 delta->recent_offsets[0] = delta->prev_offset;
369 delta->recent_powers[0] = delta->prev_power;
372 delta->prev_offset = delta->upcoming_offset;
373 delta->prev_power = delta->upcoming_power;
377 lzms_update_lru_queues(struct lzms_lru_queues *lru)
379 lzms_update_lz_lru_queue(&lru->lz);
380 lzms_update_delta_lru_queues(&lru->delta);