]> wimlib.net Git - wimlib/blob - src/lzms-common.c
LZX, LZMS: Annotate unaligned memory accesses in x86 filtering
[wimlib] / src / lzms-common.c
1 /*
2  * lzms-common.c
3  *
4  * Code shared between the compressor and decompressor for the LZMS compression
5  * format.
6  */
7
8 /*
9  * Copyright (C) 2013, 2014 Eric Biggers
10  *
11  * This file is free software; you can redistribute it and/or modify it under
12  * the terms of the GNU Lesser General Public License as published by the Free
13  * Software Foundation; either version 3 of the License, or (at your option) any
14  * later version.
15  *
16  * This file is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
19  * details.
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * along with this file; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #include "wimlib/endianness.h"
30 #include "wimlib/lzms.h"
31 #include "wimlib/unaligned.h"
32 #include "wimlib/util.h"
33
34 #include <pthread.h>
35
36 /***************************************************************
37  * Constant tables initialized by lzms_compute_slots():        *
38  ***************************************************************/
39
40 /* Table: offset slot => offset slot base value  */
41 u32 lzms_offset_slot_base[LZMS_MAX_NUM_OFFSET_SYMS + 1];
42
43 /* Table: offset slot => number of extra offset bits  */
44 u8 lzms_extra_offset_bits[LZMS_MAX_NUM_OFFSET_SYMS];
45
46 /* Table: length slot => length slot base value  */
47 u32 lzms_length_slot_base[LZMS_NUM_LEN_SYMS + 1];
48
49 /* Table: length slot => number of extra length bits  */
50 u8 lzms_extra_length_bits[LZMS_NUM_LEN_SYMS];
51
52 unsigned
53 lzms_get_slot(u32 value, const u32 slot_base_tab[], unsigned num_slots)
54 {
55         unsigned l = 0;
56         unsigned r = num_slots - 1;
57         for (;;) {
58                 LZMS_ASSERT(r >= l);
59                 unsigned slot = (l + r) / 2;
60                 if (value >= slot_base_tab[slot]) {
61                         if (value < slot_base_tab[slot + 1])
62                                 return slot;
63                         else
64                                 l = slot + 1;
65                 } else {
66                         r = slot - 1;
67                 }
68         }
69 }
70
71 static void
72 lzms_decode_delta_rle_slot_bases(u32 slot_bases[],
73                                  u8 extra_bits[],
74                                  const u8 delta_run_lens[],
75                                  unsigned num_run_lens,
76                                  u32 final,
77                                  unsigned expected_num_slots)
78 {
79         unsigned order = 0;
80         u32 delta = 1;
81         u32 base = 0;
82         unsigned slot = 0;
83         for (unsigned i = 0; i < num_run_lens; i++) {
84                 unsigned run_len = delta_run_lens[i];
85                 while (run_len--) {
86                         base += delta;
87                         if (slot > 0)
88                                 extra_bits[slot - 1] = order;
89                         slot_bases[slot] = base;
90                         slot++;
91                 }
92                 delta <<= 1;
93                 order++;
94         }
95         LZMS_ASSERT(slot == expected_num_slots);
96
97         slot_bases[slot] = final;
98         extra_bits[slot - 1] = bsr32(slot_bases[slot] - slot_bases[slot - 1]);
99 }
100
101 /* Initialize the global offset and length slot tables.  */
102 static void
103 lzms_compute_slots(void)
104 {
105         /* If an explicit formula that maps LZMS offset and length slots to slot
106          * bases exists, then it could be used here.  But until one is found,
107          * the following code fills in the slots using the observation that the
108          * increase from one slot base to the next is an increasing power of 2.
109          * Therefore, run-length encoding of the delta of adjacent entries can
110          * be used.  */
111         static const u8 offset_slot_delta_run_lens[] = {
112                 9,   0,   9,   7,   10,  15,  15,  20,
113                 20,  30,  33,  40,  42,  45,  60,  73,
114                 80,  85,  95,  105, 6,
115         };
116
117         static const u8 length_slot_delta_run_lens[] = {
118                 27,  4,   6,   4,   5,   2,   1,   1,
119                 1,   1,   1,   0,   0,   0,   0,   0,
120                 1,
121         };
122
123         /* Offset slots  */
124         lzms_decode_delta_rle_slot_bases(lzms_offset_slot_base,
125                                          lzms_extra_offset_bits,
126                                          offset_slot_delta_run_lens,
127                                          ARRAY_LEN(offset_slot_delta_run_lens),
128                                          0x7fffffff,
129                                          LZMS_MAX_NUM_OFFSET_SYMS);
130
131         /* Length slots  */
132         lzms_decode_delta_rle_slot_bases(lzms_length_slot_base,
133                                          lzms_extra_length_bits,
134                                          length_slot_delta_run_lens,
135                                          ARRAY_LEN(length_slot_delta_run_lens),
136                                          0x400108ab,
137                                          LZMS_NUM_LEN_SYMS);
138 }
139
140 /* Initialize the global offset and length slot tables if not already done.  */
141 void
142 lzms_init_slots(void)
143 {
144         static pthread_once_t once = PTHREAD_ONCE_INIT;
145
146         pthread_once(&once, lzms_compute_slots);
147 }
148
149 static s32
150 lzms_maybe_do_x86_translation(u8 data[restrict], s32 i, s32 num_op_bytes,
151                               s32 * restrict closest_target_usage_p,
152                               s32 last_target_usages[restrict],
153                               s32 max_trans_offset, bool undo)
154 {
155         u16 pos;
156
157         if (undo) {
158                 if (i - *closest_target_usage_p <= max_trans_offset) {
159                         LZMS_DEBUG("Undid x86 translation at position %d "
160                                    "(opcode 0x%02x)", i, data[i]);
161                         void *p32 = &data[i + num_op_bytes];
162                         u32 n = le32_to_cpu(load_le32_unaligned(p32));
163                         store_le32_unaligned(cpu_to_le32(n - i), p32);
164                 }
165                 pos = i + le16_to_cpu(load_le16_unaligned(&data[i + num_op_bytes]));
166         } else {
167                 pos = i + le16_to_cpu(load_le16_unaligned(&data[i + num_op_bytes]));
168
169                 if (i - *closest_target_usage_p <= max_trans_offset) {
170                         LZMS_DEBUG("Did x86 translation at position %d "
171                                    "(opcode 0x%02x)", i, data[i]);
172                         void *p32 = &data[i + num_op_bytes];
173                         u32 n = le32_to_cpu(load_le32_unaligned(p32));
174                         store_le32_unaligned(cpu_to_le32(n + i), p32);
175                 }
176         }
177
178         i += num_op_bytes + sizeof(le32) - 1;
179
180         if (i - last_target_usages[pos] <= LZMS_X86_MAX_GOOD_TARGET_OFFSET)
181                 *closest_target_usage_p = i;
182
183         last_target_usages[pos] = i;
184
185         return i + 1;
186 }
187
188 static inline s32
189 lzms_may_x86_translate(const u8 p[restrict], s32 *restrict max_offset_ret)
190 {
191         /* Switch on first byte of the opcode, assuming it is really an x86
192          * instruction.  */
193         *max_offset_ret = LZMS_X86_MAX_TRANSLATION_OFFSET;
194         switch (p[0]) {
195         case 0x48:
196                 if (p[1] == 0x8b) {
197                         if (p[2] == 0x5 || p[2] == 0xd) {
198                                 /* Load relative (x86_64)  */
199                                 return 3;
200                         }
201                 } else if (p[1] == 0x8d) {
202                         if ((p[2] & 0x7) == 0x5) {
203                                 /* Load effective address relative (x86_64)  */
204                                 return 3;
205                         }
206                 }
207                 break;
208
209         case 0x4c:
210                 if (p[1] == 0x8d) {
211                         if ((p[2] & 0x7) == 0x5) {
212                                 /* Load effective address relative (x86_64)  */
213                                 return 3;
214                         }
215                 }
216                 break;
217
218         case 0xe8:
219                 /* Call relative  */
220                 *max_offset_ret = LZMS_X86_MAX_TRANSLATION_OFFSET / 2;
221                 return 1;
222
223         case 0xe9:
224                 /* Jump relative  */
225                 *max_offset_ret = 0;
226                 return 5;
227
228         case 0xf0:
229                 if (p[1] == 0x83 && p[2] == 0x05) {
230                         /* Lock add relative  */
231                         return 3;
232                 }
233                 break;
234
235         case 0xff:
236                 if (p[1] == 0x15) {
237                         /* Call indirect  */
238                         return 2;
239                 }
240                 break;
241         }
242         *max_offset_ret = 0;
243         return 1;
244 }
245
246 /*
247  * Translate relative addresses embedded in x86 instructions into absolute
248  * addresses (@undo == %false), or undo this translation (@undo == %true).
249  *
250  * Absolute addresses are usually more compressible by LZ factorization.
251  *
252  * @last_target_usages must be a temporary array of length >= 65536.
253  */
254 void
255 lzms_x86_filter(u8 data[restrict], s32 size,
256                 s32 last_target_usages[restrict], bool undo)
257 {
258         /*
259          * Note: this filter runs unconditionally and uses a custom algorithm to
260          * detect data regions that probably contain x86 code.
261          *
262          * 'closest_target_usage' tracks the most recent position that has a
263          * good chance of being an x86 instruction.  When the filter detects a
264          * likely x86 instruction, it updates this variable and considers the
265          * next 1023 bytes of data as valid for x86 translations.
266          *
267          * If part of the data does not, in fact, contain x86 machine code, then
268          * 'closest_target_usage' will, very likely, eventually fall more than
269          * 1023 bytes behind the current position.  This results in x86
270          * translations being disabled until the next likely x86 instruction is
271          * detected.
272          *
273          * Translations on relative call (e8 opcode) instructions are slightly
274          * more restricted.  They require that the most recent likely x86
275          * instruction was in the last 511 bytes, rather than the last 1023
276          * bytes.
277          *
278          * To identify "likely x86 instructions", the algorithm attempts to
279          * track the position of the most recent potential relative-addressing
280          * instruction that referenced each possible memory address.  If it
281          * finds two references to the same memory address within a 65535 byte
282          * window, the second reference is flagged as a likely x86 instruction.
283          * Since the instructions considered for translation necessarily use
284          * relative addressing, the algorithm does a tentative translation into
285          * absolute addresses.  In addition, so that memory addresses can be
286          * looked up in an array of reasonable size (in this code,
287          * 'last_target_usages'), only the low-order 2 bytes of each address are
288          * considered significant.
289          */
290
291         s32 closest_target_usage = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1;
292
293         for (s32 i = 0; i < 65536; i++)
294                 last_target_usages[i] = -LZMS_X86_MAX_GOOD_TARGET_OFFSET - 1;
295
296         for (s32 i = 1; i < size - 16; ) {
297                 s32 max_trans_offset;
298                 s32 n;
299
300                 n = lzms_may_x86_translate(data + i, &max_trans_offset);
301
302                 if (max_trans_offset) {
303                         /* Recognized opcode.  */
304                         i = lzms_maybe_do_x86_translation(data, i, n,
305                                                           &closest_target_usage,
306                                                           last_target_usages,
307                                                           max_trans_offset,
308                                                           undo);
309                 } else {
310                         /* Not a recognized opcode.  */
311                         i += n;
312                 }
313         }
314 }
315
316 void
317 lzms_init_lz_lru_queues(struct lzms_lz_lru_queues *lz)
318 {
319         /* Recent offsets for LZ matches  */
320         for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++)
321                 lz->recent_offsets[i] = i + 1;
322
323         lz->prev_offset = 0;
324         lz->upcoming_offset = 0;
325 }
326
327 void
328 lzms_init_delta_lru_queues(struct lzms_delta_lru_queues *delta)
329 {
330         /* Recent offsets and powers for LZ matches  */
331         for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++) {
332                 delta->recent_offsets[i] = i + 1;
333                 delta->recent_powers[i] = 0;
334         }
335         delta->prev_offset = 0;
336         delta->prev_power = 0;
337         delta->upcoming_offset = 0;
338         delta->upcoming_power = 0;
339 }
340
341
342 void
343 lzms_init_lru_queues(struct lzms_lru_queues *lru)
344 {
345         lzms_init_lz_lru_queues(&lru->lz);
346         lzms_init_delta_lru_queues(&lru->delta);
347 }
348
349 void
350 lzms_update_lz_lru_queue(struct lzms_lz_lru_queues *lz)
351 {
352         if (lz->prev_offset != 0) {
353                 for (int i = LZMS_NUM_RECENT_OFFSETS - 1; i >= 0; i--)
354                         lz->recent_offsets[i + 1] = lz->recent_offsets[i];
355                 lz->recent_offsets[0] = lz->prev_offset;
356         }
357         lz->prev_offset = lz->upcoming_offset;
358 }
359
360 void
361 lzms_update_delta_lru_queues(struct lzms_delta_lru_queues *delta)
362 {
363         if (delta->prev_offset != 0) {
364                 for (int i = LZMS_NUM_RECENT_OFFSETS - 1; i >= 0; i--) {
365                         delta->recent_offsets[i + 1] = delta->recent_offsets[i];
366                         delta->recent_powers[i + 1] = delta->recent_powers[i];
367                 }
368                 delta->recent_offsets[0] = delta->prev_offset;
369                 delta->recent_powers[0] = delta->prev_power;
370         }
371
372         delta->prev_offset = delta->upcoming_offset;
373         delta->prev_power = delta->upcoming_power;
374 }
375
376 void
377 lzms_update_lru_queues(struct lzms_lru_queues *lru)
378 {
379         lzms_update_lz_lru_queue(&lru->lz);
380         lzms_update_delta_lru_queues(&lru->delta);
381 }