]> wimlib.net Git - wimlib/blobdiff - src/lzms_common.c
mount_image.c: add fallback definitions of RENAME_* constants
[wimlib] / src / lzms_common.c
index dfe022b06e765d229b720a98eca062134c9c8f48..50176847f8b042cd20b1600342c5ee0fad0805da 100644 (file)
@@ -3,7 +3,7 @@
  */
 
 /*
- * Copyright (C) 2013, 2014 Eric Biggers
+ * Copyright (C) 2013-2016 Eric Biggers
  *
  * This file is free software; you can redistribute it and/or modify it under
  * the terms of the GNU Lesser General Public License as published by the Free
  * details.
  *
  * You should have received a copy of the GNU Lesser General Public License
- * along with this file; if not, see http://www.gnu.org/licenses/.
+ * along with this file; if not, see https://www.gnu.org/licenses/.
  */
 
 #ifdef HAVE_CONFIG_H
 #  include "config.h"
 #endif
 
+#include "wimlib/cpu_features.h"
 #include "wimlib/lzms_common.h"
 #include "wimlib/unaligned.h"
 
+#ifdef __x86_64__
+#  include <emmintrin.h>
+#endif
+
 /* Table: offset slot => offset slot base value  */
 const u32 lzms_offset_slot_base[LZMS_MAX_NUM_OFFSET_SYMS + 1] = {
        0x00000001, 0x00000002, 0x00000003, 0x00000004,
@@ -346,9 +351,13 @@ lzms_get_num_offset_slots(size_t uncompressed_size)
 }
 
 void
-lzms_init_probability_entries(struct lzms_probability_entry *entries, size_t count)
+lzms_init_probabilities(struct lzms_probabilites *probs)
 {
-       for (size_t i = 0; i < count; i++) {
+       struct lzms_probability_entry *entries =
+               (struct lzms_probability_entry *)probs;
+       size_t num_entries = sizeof(struct lzms_probabilites) /
+                            sizeof(struct lzms_probability_entry);
+       for (size_t i = 0; i < num_entries; i++) {
                entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
                entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
        }
@@ -368,6 +377,171 @@ lzms_dilute_symbol_frequencies(u32 freqs[], unsigned num_syms)
                freqs[sym] = (freqs[sym] >> 1) + 1;
 }
 
+
+#ifdef __x86_64__
+static forceinline u8 *
+find_next_opcode_sse4_2(u8 *p)
+{
+       const __v16qi potential_opcodes = (__v16qi) {0x48, 0x4C, 0xE8, 0xE9, 0xF0, 0xFF};
+       __asm__(
+               "  pcmpestri $0x0, (%[p]), %[potential_opcodes]      \n"
+               "  jc 2f                                             \n"
+               "1:                                                  \n"
+               "  add $0x10, %[p]                                   \n"
+               "  pcmpestri $0x0, (%[p]), %[potential_opcodes]      \n"
+               "  jnc 1b                                            \n"
+               "2:                                                  \n"
+       #ifdef __ILP32__ /* x32 ABI (x86_64 with 32-bit pointers) */
+               "  add %%ecx, %[p]                                   \n"
+       #else
+               "  add %%rcx, %[p]                                   \n"
+       #endif
+               : [p] "+r" (p)
+               : [potential_opcodes] "x" (potential_opcodes), "a" (6), "d" (16)
+               : "rcx", "cc"
+              );
+
+       return p;
+}
+#endif /* __x86_64__ */
+
+static forceinline u8 *
+find_next_opcode_default(u8 *p)
+{
+       /*
+        * The following table is used to accelerate the common case where the
+        * byte has nothing to do with x86 translation and must simply be
+        * skipped.  This was faster than the following alternatives:
+        *      - Jump table with 256 entries
+        *      - Switch statement with default
+        */
+       static const u8 is_potential_opcode[256] = {
+               [0x48] = 1, [0x4C] = 1, [0xE8] = 1,
+               [0xE9] = 1, [0xF0] = 1, [0xFF] = 1,
+       };
+
+       for (;;) {
+               if (is_potential_opcode[*p])
+                       break;
+               p++;
+               if (is_potential_opcode[*p])
+                       break;
+               p++;
+               if (is_potential_opcode[*p])
+                       break;
+               p++;
+               if (is_potential_opcode[*p])
+                       break;
+               p++;
+       }
+       return p;
+}
+
+static forceinline u8 *
+translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos,
+                   s32 last_target_usages[], bool undo)
+{
+       s32 max_trans_offset;
+       s32 opcode_nbytes;
+       u16 target16;
+       s32 i;
+
+       max_trans_offset = LZMS_X86_MAX_TRANSLATION_OFFSET;
+
+       /*
+        * p[0] has one of the following values:
+        *      0x48 0x4C 0xE8 0xE9 0xF0 0xFF
+        */
+
+       if (p[0] >= 0xF0) {
+               if (p[0] & 0x0F) {
+                       /* 0xFF (instruction group)  */
+                       if (p[1] == 0x15) {
+                               /* Call indirect relative  */
+                               opcode_nbytes = 2;
+                               goto have_opcode;
+                       }
+               } else {
+                       /* 0xF0 (lock prefix)  */
+                       if (p[1] == 0x83 && p[2] == 0x05) {
+                               /* Lock add relative  */
+                               opcode_nbytes = 3;
+                               goto have_opcode;
+                       }
+               }
+       } else if (p[0] <= 0x4C) {
+
+               /* 0x48 or 0x4C.  In 64-bit code this is a REX prefix byte with
+                * W=1, R=[01], X=0, and B=0, and it will be followed by the
+                * actual opcode, then additional bytes depending on the opcode.
+                * We are most interested in several common instructions that
+                * access data relative to the instruction pointer.  These use a
+                * 1-byte opcode, followed by a ModR/M byte, followed by a
+                * 4-byte displacement.  */
+
+               /* Test: does the ModR/M byte indicate RIP-relative addressing?
+                * Note: there seems to be a mistake in the format here; the
+                * mask really should be 0xC7 instead of 0x07 so that both the
+                * MOD and R/M fields of ModR/M are tested, not just R/M.  */
+               if ((p[2] & 0x07) == 0x05) {
+                       /* Check for the LEA (load effective address) or MOV
+                        * (move) opcodes.  For MOV there are additional
+                        * restrictions, although it seems they are only helpful
+                        * due to the overly lax ModR/M test.  */
+                       if (p[1] == 0x8D ||
+                           (p[1] == 0x8B && !(p[0] & 0x04) && !(p[2] & 0xF0)))
+                       {
+                               opcode_nbytes = 3;
+                               goto have_opcode;
+                       }
+               }
+       } else {
+               if (p[0] & 0x01) {
+                       /* 0xE9: Jump relative.  Theoretically this would be
+                        * useful to translate, but in fact it's explicitly
+                        * excluded.  Most likely it creates too many false
+                        * positives for the detection algorithm.  */
+                       p += 4;
+               } else {
+                       /* 0xE8: Call relative.  This is a common case, so it
+                        * uses a reduced max_trans_offset.  In other words, we
+                        * have to be more confident that the data actually is
+                        * x86 machine code before we'll do the translation.  */
+                       opcode_nbytes = 1;
+                       max_trans_offset >>= 1;
+                       goto have_opcode;
+               }
+       }
+
+       return p + 1;
+
+have_opcode:
+       i = p - data;
+       p += opcode_nbytes;
+       if (undo) {
+               if (i - *last_x86_pos <= max_trans_offset) {
+                       u32 n = get_unaligned_le32(p);
+                       put_unaligned_le32(n - i, p);
+               }
+               target16 = i + get_unaligned_le16(p);
+       } else {
+               target16 = i + get_unaligned_le16(p);
+               if (i - *last_x86_pos <= max_trans_offset) {
+                       u32 n = get_unaligned_le32(p);
+                       put_unaligned_le32(n + i, p);
+               }
+       }
+
+       i += opcode_nbytes + sizeof(le32) - 1;
+
+       if (i - last_target_usages[target16] <= LZMS_X86_ID_WINDOW_SIZE)
+               *last_x86_pos = i;
+
+       last_target_usages[target16] = i;
+
+       return p + sizeof(le32);
+}
+
 /*
  * Translate relative addresses embedded in x86 instructions into absolute
  * addresses (@undo == %false), or undo this translation (@undo == %true).
@@ -409,15 +583,14 @@ lzms_x86_filter(u8 data[restrict], s32 size,
         * low-order 2 bytes of each address are considered significant.
         */
 
-       s32 i;
-       s32 tail_idx;
-       u8 saved_byte;
-       s32 last_x86_pos;
+       u8 *p;
+       u8 *tail_ptr;
+       s32 last_x86_pos = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1;
 
        if (size <= 17)
                return;
 
-       for (i = 0; i < 65536; i++)
+       for (s32 i = 0; i < 65536; i++)
                last_target_usages[i] = -(s32)LZMS_X86_ID_WINDOW_SIZE - 1;
 
        /*
@@ -435,129 +608,36 @@ lzms_x86_filter(u8 data[restrict], s32 size,
         *     data[(size - 16) + 7] and have no effect on the result, as long
         *     as we restore those bytes later.
         */
-       tail_idx = size - 16;
-       saved_byte = data[tail_idx + 8];
-       data[tail_idx + 8] = 0xE8;
-       last_x86_pos = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1;
 
        /* Note: the very first byte must be ignored completely!  */
-       i = 0;
-       for (;;) {
-               s32 max_trans_offset;
-               s32 opcode_nbytes;
-               u16 target16;
-
-               /*
-                * The following table is used to accelerate the common case
-                * where the byte has nothing to do with x86 translation and
-                * must simply be skipped.  This is the fastest (at least on
-                * x86_64) of the implementations I tested.  The other
-                * implementations I tested were:
-                *      - Jump table with 256 entries
-                *      - Switch statement with default
-                */
-               static const u8 is_potential_opcode[256] = {
-                       [0x48] = 1, [0x4C] = 1, [0xE8] = 1,
-                       [0xE9] = 1, [0xF0] = 1, [0xFF] = 1,
-               };
+       p = data + 1;
+       tail_ptr = &data[size - 16];
 
+#ifdef __x86_64__
+       if (cpu_features & X86_CPU_FEATURE_SSE4_2) {
+               u8 saved_byte = *tail_ptr;
+               *tail_ptr = 0xE8;
                for (;;) {
-                       if (is_potential_opcode[data[++i]])
-                               break;
-                       if (is_potential_opcode[data[++i]])
+                       u8 *new_p = find_next_opcode_sse4_2(p);
+                       if (new_p >= tail_ptr - 8)
                                break;
-                       if (is_potential_opcode[data[++i]])
-                               break;
-                       if (is_potential_opcode[data[++i]])
-                               break;
-               }
-
-               if (i >= tail_idx)
-                       break;
-
-               max_trans_offset = LZMS_X86_MAX_TRANSLATION_OFFSET;
-               switch (data[i]) {
-               case 0x48:
-                       if (data[i + 1] == 0x8B) {
-                               if (data[i + 2] == 0x5 || data[i + 2] == 0xD) {
-                                       /* Load relative (x86_64)  */
-                                       opcode_nbytes = 3;
-                                       goto have_opcode;
-                               }
-                       } else if (data[i + 1] == 0x8D) {
-                               if ((data[i + 2] & 0x7) == 0x5) {
-                                       /* Load effective address relative (x86_64)  */
-                                       opcode_nbytes = 3;
-                                       goto have_opcode;
-                               }
-                       }
-                       break;
-               case 0x4C:
-                       if (data[i + 1] == 0x8D) {
-                               if ((data[i + 2] & 0x7) == 0x5) {
-                                       /* Load effective address relative (x86_64)  */
-                                       opcode_nbytes = 3;
-                                       goto have_opcode;
-                               }
-                       }
-                       break;
-               case 0xE8:
-                       /* Call relative.  Note: 'max_trans_offset' must be
-                        * halved for this instruction.  This means that we must
-                        * be more confident that we are in a region of x86
-                        * machine code before we will do a translation for this
-                        * particular instruction.  */
-                       opcode_nbytes = 1;
-                       max_trans_offset /= 2;
-                       goto have_opcode;
-               case 0xE9:
-                       /* Jump relative  */
-                       i += 4;
-                       break;
-               case 0xF0:
-                       if (data[i + 1] == 0x83 && data[i + 2] == 0x05) {
-                               /* Lock add relative  */
-                               opcode_nbytes = 3;
-                               goto have_opcode;
-                       }
-                       break;
-               case 0xFF:
-                       if (data[i + 1] == 0x15) {
-                               /* Call indirect  */
-                               opcode_nbytes = 2;
-                               goto have_opcode;
-                       }
-                       break;
+                       p = new_p;
+                       p = translate_if_needed(data, p, &last_x86_pos,
+                                               last_target_usages, undo);
                }
-
-               continue;
-
-       have_opcode:
-               if (undo) {
-                       if (i - last_x86_pos <= max_trans_offset) {
-                               void *p32 = &data[i + opcode_nbytes];
-                               u32 n = get_unaligned_u32_le(p32);
-                               put_unaligned_u32_le(n - i, p32);
-                       }
-                       target16 = i + get_unaligned_u16_le(&data[i + opcode_nbytes]);
-               } else {
-                       target16 = i + get_unaligned_u16_le(&data[i + opcode_nbytes]);
-                       if (i - last_x86_pos <= max_trans_offset) {
-                               void *p32 = &data[i + opcode_nbytes];
-                               u32 n = get_unaligned_u32_le(p32);
-                               put_unaligned_u32_le(n + i, p32);
-                       }
+               *tail_ptr = saved_byte;
+       }
+#endif
+       {
+               u8 saved_byte = *(tail_ptr + 8);
+               *(tail_ptr + 8) = 0xE8;
+               for (;;) {
+                       p = find_next_opcode_default(p);
+                       if (p >= tail_ptr)
+                               break;
+                       p = translate_if_needed(data, p, &last_x86_pos,
+                                               last_target_usages, undo);
                }
-
-               i += opcode_nbytes + sizeof(le32) - 1;
-
-               if (i - last_target_usages[target16] <= LZMS_X86_ID_WINDOW_SIZE)
-                       last_x86_pos = i;
-
-               last_target_usages[target16] = i;
-
-               continue;
+               *(tail_ptr + 8) = saved_byte;
        }
-
-       data[tail_idx + 8] = saved_byte;
 }