X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=src%2Flzms_common.c;h=fb13a4358c5469a79d0c4c5e101c03ee2f9e59db;hb=01ce2d43d6ba9721bf46c3e132c4be394ef3f0f9;hp=333beb5f9aad97626795412fa6d7808511f91a72;hpb=eb50d779c493f012b0622971643ee8f348bc8b5d;p=wimlib diff --git a/src/lzms_common.c b/src/lzms_common.c index 333beb5f..fb13a435 100644 --- a/src/lzms_common.c +++ b/src/lzms_common.c @@ -3,7 +3,7 @@ */ /* - * Copyright (C) 2013, 2014 Eric Biggers + * Copyright (C) 2013-2016 Eric Biggers * * This file is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free @@ -379,7 +379,7 @@ lzms_dilute_symbol_frequencies(u32 freqs[], unsigned num_syms) #ifdef __x86_64__ -static inline u8 * +static forceinline u8 * find_next_opcode_sse4_2(u8 *p) { const __v16qi potential_opcodes = (__v16qi) {0x48, 0x4C, 0xE8, 0xE9, 0xF0, 0xFF}; @@ -391,7 +391,11 @@ find_next_opcode_sse4_2(u8 *p) " pcmpestri $0x0, (%[p]), %[potential_opcodes] \n" " jnc 1b \n" "2: \n" + #ifdef __ILP32__ /* x32 ABI (x86_64 with 32-bit pointers) */ + " add %%ecx, %[p] \n" + #else " add %%rcx, %[p] \n" + #endif : [p] "+r" (p) : [potential_opcodes] "x" (potential_opcodes), "a" (6), "d" (16) : "rcx", "cc" @@ -401,7 +405,7 @@ find_next_opcode_sse4_2(u8 *p) } #endif /* __x86_64__ */ -static inline u8 * +static forceinline u8 * find_next_opcode_default(u8 *p) { /* @@ -433,7 +437,7 @@ find_next_opcode_default(u8 *p) return p; } -static inline u8 * +static forceinline u8 * translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos, s32 last_target_usages[], bool undo) { @@ -446,26 +450,26 @@ translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos, /* * p[0] has one of the following values: - * 0xE8 0xE9 0x48 0x4C 0xF0 0xFF + * 0x48 0x4C 0xE8 0xE9 0xF0 0xFF */ - if ((p[0] & 0xFE) == 0xE8) { - if (p[0] & 0x01) { - /* 0xE9: Jump relative. Theoretically this would be - * useful to translate, but in fact it's explicitly - * excluded. Most likely it creates too many false - * positives for the detection algorithm. */ - p += 4; + if (p[0] >= 0xF0) { + if (p[0] & 0x0F) { + /* 0xFF (instruction group) */ + if (p[1] == 0x15) { + /* Call indirect relative */ + opcode_nbytes = 2; + goto have_opcode; + } } else { - /* 0xE8: Call relative. This is a common case, so it - * uses a reduced max_trans_offset. In other words, we - * have to be more confident that the data actually is - * x86 machine code before we'll do the translation. */ - opcode_nbytes = 1; - max_trans_offset >>= 1; - goto have_opcode; + /* 0xF0 (lock prefix) */ + if (p[1] == 0x83 && p[2] == 0x05) { + /* Lock add relative */ + opcode_nbytes = 3; + goto have_opcode; + } } - } else if ((p[0] & 0xFB) == 0x48) { + } else if (p[0] <= 0x4C) { /* 0x48 or 0x4C. In 64-bit code this is a REX prefix byte with * W=1, R=[01], X=0, and B=0, and it will be followed by the @@ -492,20 +496,20 @@ translate_if_needed(u8 *data, u8 *p, s32 *last_x86_pos, } } } else { - if (p[0] & 0x0F) { - /* 0xFF (instruction group) */ - if (p[1] == 0x15) { - /* Call indirect relative */ - opcode_nbytes = 2; - goto have_opcode; - } + if (p[0] & 0x01) { + /* 0xE9: Jump relative. Theoretically this would be + * useful to translate, but in fact it's explicitly + * excluded. Most likely it creates too many false + * positives for the detection algorithm. */ + p += 4; } else { - /* 0xF0 (lock prefix) */ - if (p[1] == 0x83 && p[2] == 0x05) { - /* Lock add relative */ - opcode_nbytes = 3; - goto have_opcode; - } + /* 0xE8: Call relative. This is a common case, so it + * uses a reduced max_trans_offset. In other words, we + * have to be more confident that the data actually is + * x86 machine code before we'll do the translation. */ + opcode_nbytes = 1; + max_trans_offset >>= 1; + goto have_opcode; } } @@ -516,15 +520,15 @@ have_opcode: p += opcode_nbytes; if (undo) { if (i - *last_x86_pos <= max_trans_offset) { - u32 n = get_unaligned_u32_le(p); - put_unaligned_u32_le(n - i, p); + u32 n = get_unaligned_le32(p); + put_unaligned_le32(n - i, p); } - target16 = i + get_unaligned_u16_le(p); + target16 = i + get_unaligned_le16(p); } else { - target16 = i + get_unaligned_u16_le(p); + target16 = i + get_unaligned_le16(p); if (i - *last_x86_pos <= max_trans_offset) { - u32 n = get_unaligned_u32_le(p); - put_unaligned_u32_le(n + i, p); + u32 n = get_unaligned_le32(p); + put_unaligned_le32(n + i, p); } }