/*
- * lzms-common.c - Common code for LZMS compression and decompression
+ * lzms_common.c - Common code for LZMS compression and decompression
*/
/*
# include "config.h"
#endif
-#include "wimlib/endianness.h"
#include "wimlib/lzms_common.h"
#include "wimlib/unaligned.h"
0x0000009b, 0x000000ab, 0x000000cb, 0x000000eb,
0x0000012b, 0x000001ab, 0x000002ab, 0x000004ab,
0x000008ab, 0x000108ab, 0x400108ab,
- /* The last entry is extra; it is equal to LZMS_MAX_MATCH_LEN + 1 and is
- * here to aid binary search. */
+ /* The last entry is extra; it is equal to LZMS_MAX_MATCH_LENGTH + 1 and
+ * is here to aid binary search. */
};
/* Table: length slot => number of extra length bits */
}
void
-lzms_init_symbol_frequencies(u32 freqs[], size_t num_syms)
+lzms_init_symbol_frequencies(u32 freqs[], unsigned num_syms)
+{
+ for (unsigned sym = 0; sym < num_syms; sym++)
+ freqs[sym] = 1;
+}
+
+void
+lzms_dilute_symbol_frequencies(u32 freqs[], unsigned num_syms)
+{
+ for (unsigned sym = 0; sym < num_syms; sym++)
+ freqs[sym] = (freqs[sym] >> 1) + 1;
+}
+
+static inline u8 *
+find_next_opcode(u8 *p)
{
- for (size_t i = 0; i < num_syms; i++)
- freqs[i] = 1;
+ /*
+ * The following table is used to accelerate the common case where the
+ * byte has nothing to do with x86 translation and must simply be
+ * skipped. This was faster than the following alternatives:
+ * - Jump table with 256 entries
+ * - Switch statement with default
+ */
+ static const u8 is_potential_opcode[256] = {
+ [0x48] = 1, [0x4C] = 1, [0xE8] = 1,
+ [0xE9] = 1, [0xF0] = 1, [0xFF] = 1,
+ };
+
+ for (;;) {
+ if (is_potential_opcode[*p])
+ break;
+ p++;
+ if (is_potential_opcode[*p])
+ break;
+ p++;
+ if (is_potential_opcode[*p])
+ break;
+ p++;
+ if (is_potential_opcode[*p])
+ break;
+ p++;
+ }
+ return p;
+}
+
+static inline u8 *
+translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos,
+ s32 last_target_usages[], bool undo)
+{
+ s32 max_trans_offset;
+ s32 opcode_nbytes;
+ u16 target16;
+ s32 i;
+
+ max_trans_offset = LZMS_X86_MAX_TRANSLATION_OFFSET;
+
+ if ((*p & 0xFE) == 0xE8) {
+ if (*p & 0x01) {
+ /* 0xE9: Jump relative */
+ p += 4;
+ } else {
+ /* 0xE8: Call relative. Note: 'max_trans_offset' must
+ * be halved for this instruction. This means that we
+ * must be more confident that we are in a region of x86
+ * machine code before we will do a translation for this
+ * particular instruction. */
+ opcode_nbytes = 1;
+ max_trans_offset /= 2;
+ goto have_opcode;
+ }
+ } else if ((*p & 0xFB) == 0x48) {
+ if (*p & 0x04) {
+ /* 0x4C */
+ if (*(p + 1) == 0x8D) {
+ if ((*(p + 2) & 0x7) == 0x5) {
+ /* Load effective address relative (x86_64) */
+ opcode_nbytes = 3;
+ goto have_opcode;
+ }
+ }
+ } else {
+ /* 0x48 */
+ if (*(p + 1) == 0x8B) {
+ if (*(p + 2) == 0x5 || *(p + 2) == 0xD) {
+ /* Load relative (x86_64) */
+ opcode_nbytes = 3;
+ goto have_opcode;
+ }
+ } else if (*(p + 1) == 0x8D) {
+ if ((*(p + 2) & 0x7) == 0x5) {
+ /* Load effective address relative (x86_64) */
+ opcode_nbytes = 3;
+ goto have_opcode;
+ }
+ }
+ }
+ } else {
+ if (*p & 0x0F) {
+ /* 0xFF */
+ if (*(p + 1) == 0x15) {
+ /* Call indirect */
+ opcode_nbytes = 2;
+ goto have_opcode;
+ }
+ } else {
+ /* 0xF0 */
+ if (*(p + 1) == 0x83 && *(p + 2) == 0x05) {
+ /* Lock add relative */
+ opcode_nbytes = 3;
+ goto have_opcode;
+ }
+ }
+ }
+
+ return p + 1;
+
+have_opcode:
+ i = p - data;
+ p += opcode_nbytes;
+ if (undo) {
+ if (i - *last_x86_pos <= max_trans_offset) {
+ u32 n = get_unaligned_u32_le(p);
+ put_unaligned_u32_le(n - i, p);
+ }
+ target16 = i + get_unaligned_u16_le(p);
+ } else {
+ target16 = i + get_unaligned_u16_le(p);
+ if (i - *last_x86_pos <= max_trans_offset) {
+ u32 n = get_unaligned_u32_le(p);
+ put_unaligned_u32_le(n + i, p);
+ }
+ }
+
+ i += opcode_nbytes + sizeof(le32) - 1;
+
+ if (i - last_target_usages[target16] <= LZMS_X86_ID_WINDOW_SIZE)
+ *last_x86_pos = i;
+
+ last_target_usages[target16] = i;
+
+ return p + sizeof(le32);
}
/*
* low-order 2 bytes of each address are considered significant.
*/
- s32 i;
- s32 tail_idx;
+ u8 *p;
+ u8 *tail_ptr;
u8 saved_byte;
s32 last_x86_pos;
if (size <= 17)
return;
- for (i = 0; i < 65536; i++)
+ for (s32 i = 0; i < 65536; i++)
last_target_usages[i] = -(s32)LZMS_X86_ID_WINDOW_SIZE - 1;
/*
* data[(size - 16) + 7] and have no effect on the result, as long
* as we restore those bytes later.
*/
- tail_idx = size - 16;
- saved_byte = data[tail_idx + 8];
- data[tail_idx + 8] = 0xE8;
+ tail_ptr = &data[size - 16];
+ saved_byte = *(tail_ptr + 8);
+ *(tail_ptr + 8) = 0xE8;
last_x86_pos = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1;
/* Note: the very first byte must be ignored completely! */
- i = 0;
+ p = data + 1;
for (;;) {
- s32 max_trans_offset;
- s32 opcode_nbytes;
- u16 target16;
-
- /*
- * The following table is used to accelerate the common case
- * where the byte has nothing to do with x86 translation and
- * must simply be skipped. This is the fastest (at least on
- * x86_64) of the implementations I tested. The other
- * implementations I tested were:
- * - Jump table with 256 entries
- * - Switch statement with default
- */
- static const u8 is_potential_opcode[256] = {
- [0x48] = 1, [0x4C] = 1, [0xE8] = 1,
- [0xE9] = 1, [0xF0] = 1, [0xFF] = 1,
- };
-
- for (;;) {
- if (is_potential_opcode[data[++i]])
- break;
- if (is_potential_opcode[data[++i]])
- break;
- if (is_potential_opcode[data[++i]])
- break;
- if (is_potential_opcode[data[++i]])
- break;
- }
-
- if (i >= tail_idx)
- break;
+ p = find_next_opcode(p);
- max_trans_offset = LZMS_X86_MAX_TRANSLATION_OFFSET;
- switch (data[i]) {
- case 0x48:
- if (data[i + 1] == 0x8B) {
- if (data[i + 2] == 0x5 || data[i + 2] == 0xD) {
- /* Load relative (x86_64) */
- opcode_nbytes = 3;
- goto have_opcode;
- }
- } else if (data[i + 1] == 0x8D) {
- if ((data[i + 2] & 0x7) == 0x5) {
- /* Load effective address relative (x86_64) */
- opcode_nbytes = 3;
- goto have_opcode;
- }
- }
+ if (p >= tail_ptr)
break;
- case 0x4C:
- if (data[i + 1] == 0x8D) {
- if ((data[i + 2] & 0x7) == 0x5) {
- /* Load effective address relative (x86_64) */
- opcode_nbytes = 3;
- goto have_opcode;
- }
- }
- break;
- case 0xE8:
- /* Call relative. Note: 'max_trans_offset' must be
- * halved for this instruction. This means that we must
- * be more confident that we are in a region of x86
- * machine code before we will do a translation for this
- * particular instruction. */
- opcode_nbytes = 1;
- max_trans_offset /= 2;
- goto have_opcode;
- case 0xE9:
- /* Jump relative */
- i += 4;
- break;
- case 0xF0:
- if (data[i + 1] == 0x83 && data[i + 2] == 0x05) {
- /* Lock add relative */
- opcode_nbytes = 3;
- goto have_opcode;
- }
- break;
- case 0xFF:
- if (data[i + 1] == 0x15) {
- /* Call indirect */
- opcode_nbytes = 2;
- goto have_opcode;
- }
- break;
- }
-
- continue;
-
- have_opcode:
- if (undo) {
- if (i - last_x86_pos <= max_trans_offset) {
- LZMS_DEBUG("Undid x86 translation at position %d "
- "(opcode 0x%02x)", i, data[i]);
- void *p32 = &data[i + opcode_nbytes];
- u32 n = get_unaligned_u32_le(p32);
- put_unaligned_u32_le(n - i, p32);
- }
- target16 = i + get_unaligned_u16_le(&data[i + opcode_nbytes]);
- } else {
- target16 = i + get_unaligned_u16_le(&data[i + opcode_nbytes]);
- if (i - last_x86_pos <= max_trans_offset) {
- LZMS_DEBUG("Did x86 translation at position %d "
- "(opcode 0x%02x)", i, data[i]);
- void *p32 = &data[i + opcode_nbytes];
- u32 n = get_unaligned_u32_le(p32);
- put_unaligned_u32_le(n + i, p32);
- }
- }
-
- i += opcode_nbytes + sizeof(le32) - 1;
-
- if (i - last_target_usages[target16] <= LZMS_X86_ID_WINDOW_SIZE)
- last_x86_pos = i;
-
- last_target_usages[target16] = i;
- continue;
+ p = translate_if_needed(data, p, &last_x86_pos, last_target_usages, undo);
}
- data[tail_idx + 8] = saved_byte;
+ *(tail_ptr + 8) = saved_byte;
}