]> wimlib.net Git - wimlib/blobdiff - src/lzms_common.c
v1.14.4
[wimlib] / src / lzms_common.c
index 333beb5f9aad97626795412fa6d7808511f91a72..50176847f8b042cd20b1600342c5ee0fad0805da 100644 (file)
@@ -3,7 +3,7 @@
  */
 
 /*
- * Copyright (C) 2013, 2014 Eric Biggers
+ * Copyright (C) 2013-2016 Eric Biggers
  *
  * This file is free software; you can redistribute it and/or modify it under
  * the terms of the GNU Lesser General Public License as published by the Free
  * details.
  *
  * You should have received a copy of the GNU Lesser General Public License
- * along with this file; if not, see http://www.gnu.org/licenses/.
+ * along with this file; if not, see https://www.gnu.org/licenses/.
  */
 
 #ifdef HAVE_CONFIG_H
 #  include "config.h"
 #endif
 
+#include "wimlib/cpu_features.h"
 #include "wimlib/lzms_common.h"
 #include "wimlib/unaligned.h"
-#include "wimlib/x86_cpu_features.h"
 
 #ifdef __x86_64__
 #  include <emmintrin.h>
@@ -379,7 +379,7 @@ lzms_dilute_symbol_frequencies(u32 freqs[], unsigned num_syms)
 
 
 #ifdef __x86_64__
-static inline u8 *
+static forceinline u8 *
 find_next_opcode_sse4_2(u8 *p)
 {
        const __v16qi potential_opcodes = (__v16qi) {0x48, 0x4C, 0xE8, 0xE9, 0xF0, 0xFF};
@@ -391,7 +391,11 @@ find_next_opcode_sse4_2(u8 *p)
                "  pcmpestri $0x0, (%[p]), %[potential_opcodes]      \n"
                "  jnc 1b                                            \n"
                "2:                                                  \n"
+       #ifdef __ILP32__ /* x32 ABI (x86_64 with 32-bit pointers) */
+               "  add %%ecx, %[p]                                   \n"
+       #else
                "  add %%rcx, %[p]                                   \n"
+       #endif
                : [p] "+r" (p)
                : [potential_opcodes] "x" (potential_opcodes), "a" (6), "d" (16)
                : "rcx", "cc"
@@ -401,7 +405,7 @@ find_next_opcode_sse4_2(u8 *p)
 }
 #endif /* __x86_64__ */
 
-static inline u8 *
+static forceinline u8 *
 find_next_opcode_default(u8 *p)
 {
        /*
@@ -433,7 +437,7 @@ find_next_opcode_default(u8 *p)
        return p;
 }
 
-static inline u8 *
+static forceinline u8 *
 translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos,
                    s32 last_target_usages[], bool undo)
 {
@@ -446,26 +450,26 @@ translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos,
 
        /*
         * p[0] has one of the following values:
-        *      0xE8 0xE9 0x48 0x4C 0xF0 0xFF
+        *      0x48 0x4C 0xE8 0xE9 0xF0 0xFF
         */
 
-       if ((p[0] & 0xFE) == 0xE8) {
-               if (p[0] & 0x01) {
-                       /* 0xE9: Jump relative.  Theoretically this would be
-                        * useful to translate, but in fact it's explicitly
-                        * excluded.  Most likely it creates too many false
-                        * positives for the detection algorithm.  */
-                       p += 4;
+       if (p[0] >= 0xF0) {
+               if (p[0] & 0x0F) {
+                       /* 0xFF (instruction group)  */
+                       if (p[1] == 0x15) {
+                               /* Call indirect relative  */
+                               opcode_nbytes = 2;
+                               goto have_opcode;
+                       }
                } else {
-                       /* 0xE8: Call relative.  This is a common case, so it
-                        * uses a reduced max_trans_offset.  In other words, we
-                        * have to be more confident that the data actually is
-                        * x86 machine code before we'll do the translation.  */
-                       opcode_nbytes = 1;
-                       max_trans_offset >>= 1;
-                       goto have_opcode;
+                       /* 0xF0 (lock prefix)  */
+                       if (p[1] == 0x83 && p[2] == 0x05) {
+                               /* Lock add relative  */
+                               opcode_nbytes = 3;
+                               goto have_opcode;
+                       }
                }
-       } else if ((p[0] & 0xFB) == 0x48) {
+       } else if (p[0] <= 0x4C) {
 
                /* 0x48 or 0x4C.  In 64-bit code this is a REX prefix byte with
                 * W=1, R=[01], X=0, and B=0, and it will be followed by the
@@ -492,20 +496,20 @@ translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos,
                        }
                }
        } else {
-               if (p[0] & 0x0F) {
-                       /* 0xFF (instruction group)  */
-                       if (p[1] == 0x15) {
-                               /* Call indirect relative  */
-                               opcode_nbytes = 2;
-                               goto have_opcode;
-                       }
+               if (p[0] & 0x01) {
+                       /* 0xE9: Jump relative.  Theoretically this would be
+                        * useful to translate, but in fact it's explicitly
+                        * excluded.  Most likely it creates too many false
+                        * positives for the detection algorithm.  */
+                       p += 4;
                } else {
-                       /* 0xF0 (lock prefix)  */
-                       if (p[1] == 0x83 && p[2] == 0x05) {
-                               /* Lock add relative  */
-                               opcode_nbytes = 3;
-                               goto have_opcode;
-                       }
+                       /* 0xE8: Call relative.  This is a common case, so it
+                        * uses a reduced max_trans_offset.  In other words, we
+                        * have to be more confident that the data actually is
+                        * x86 machine code before we'll do the translation.  */
+                       opcode_nbytes = 1;
+                       max_trans_offset >>= 1;
+                       goto have_opcode;
                }
        }
 
@@ -516,15 +520,15 @@ have_opcode:
        p += opcode_nbytes;
        if (undo) {
                if (i - *last_x86_pos <= max_trans_offset) {
-                       u32 n = get_unaligned_u32_le(p);
-                       put_unaligned_u32_le(n - i, p);
+                       u32 n = get_unaligned_le32(p);
+                       put_unaligned_le32(n - i, p);
                }
-               target16 = i + get_unaligned_u16_le(p);
+               target16 = i + get_unaligned_le16(p);
        } else {
-               target16 = i + get_unaligned_u16_le(p);
+               target16 = i + get_unaligned_le16(p);
                if (i - *last_x86_pos <= max_trans_offset) {
-                       u32 n = get_unaligned_u32_le(p);
-                       put_unaligned_u32_le(n + i, p);
+                       u32 n = get_unaligned_le32(p);
+                       put_unaligned_le32(n + i, p);
                }
        }
 
@@ -610,7 +614,7 @@ lzms_x86_filter(u8 data[restrict], s32 size,
        tail_ptr = &data[size - 16];
 
 #ifdef __x86_64__
-       if (x86_have_cpu_feature(X86_CPU_FEATURE_SSE4_2)) {
+       if (cpu_features & X86_CPU_FEATURE_SSE4_2) {
                u8 saved_byte = *tail_ptr;
                *tail_ptr = 0xE8;
                for (;;) {