X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=src%2Flzms-common.c;h=e274c6ba6bc2bc54e1f20fceddeabdc8bbdc89de;hb=19383e489030b926c9816699a13b81b36bb766b1;hp=03b70736140ba5dc91fbaa26e89465d6315602ca;hpb=637f86d129be63bb9dc431e8e2ead370330ca5ce;p=wimlib diff --git a/src/lzms-common.c b/src/lzms-common.c index 03b70736..e274c6ba 100644 --- a/src/lzms-common.c +++ b/src/lzms-common.c @@ -29,53 +29,85 @@ #endif #include "wimlib/endianness.h" -#include "wimlib/error.h" #include "wimlib/lzms.h" #include "wimlib/util.h" #include -/* A table that maps position slots to their base values. These are constants - * computed at runtime by lzms_compute_slot_bases(). */ +/*************************************************************** + * Constant tables initialized by lzms_compute_slots(): * + ***************************************************************/ + +/* Table: position slot => position slot base value */ u32 lzms_position_slot_base[LZMS_MAX_NUM_OFFSET_SYMS + 1]; -/* A table that maps length slots to their base values. These are constants - * computed at runtime by lzms_compute_slot_bases(). */ +/* Table: position slot => number of extra position bits */ +u8 lzms_extra_position_bits[LZMS_MAX_NUM_OFFSET_SYMS]; + +/* Table: log2(position) => [lower bound, upper bound] on position slot */ +u16 lzms_order_to_position_slot_bounds[30][2]; + +/* Table: length slot => length slot base value */ u32 lzms_length_slot_base[LZMS_NUM_LEN_SYMS + 1]; -/* Return the slot for the specified value. */ -unsigned -lzms_get_slot(u32 value, const u32 slot_base_tab[], unsigned num_slots) -{ - unsigned slot = 0; +/* Table: length slot => number of extra length bits */ +u8 lzms_extra_length_bits[LZMS_NUM_LEN_SYMS]; - while (slot_base_tab[slot + 1] <= value) - slot++; +/* Table: length (< LZMS_NUM_FAST_LENGTHS only) => length slot */ +u8 lzms_length_slot_fast[LZMS_NUM_FAST_LENGTHS]; - return slot; +u32 +lzms_get_slot(u32 value, const u32 slot_base_tab[], unsigned num_slots) +{ + u32 l = 0; + u32 r = num_slots - 1; + for (;;) { + LZMS_ASSERT(r >= l); + u32 slot = (l + r) / 2; + if (value >= slot_base_tab[slot]) { + if (value < slot_base_tab[slot + 1]) + return slot; + else + l = slot + 1; + } else { + r = slot - 1; + } + } } - static void lzms_decode_delta_rle_slot_bases(u32 slot_bases[], - const u8 delta_run_lens[], size_t num_run_lens) + u8 extra_bits[], + const u8 delta_run_lens[], + u32 num_run_lens, + u32 final, + u32 expected_num_slots) { + u32 order = 0; u32 delta = 1; u32 base = 0; - size_t slot = 0; - for (size_t i = 0; i < num_run_lens; i++) { + u32 slot = 0; + for (u32 i = 0; i < num_run_lens; i++) { u8 run_len = delta_run_lens[i]; while (run_len--) { base += delta; - slot_bases[slot++] = base; + if (slot > 0) + extra_bits[slot - 1] = order; + slot_bases[slot] = base; + slot++; } delta <<= 1; + order++; } + LZMS_ASSERT(slot == expected_num_slots); + + slot_bases[slot] = final; + extra_bits[slot - 1] = bsr32(slot_bases[slot] - slot_bases[slot - 1]); } /* Initialize the global position and length slot tables. */ static void -lzms_compute_slot_bases(void) +lzms_compute_slots(void) { /* If an explicit formula that maps LZMS position and length slots to * slot bases exists, then it could be used here. But until one is @@ -95,34 +127,47 @@ lzms_compute_slot_bases(void) 1, }; + /* Position slots */ lzms_decode_delta_rle_slot_bases(lzms_position_slot_base, + lzms_extra_position_bits, position_slot_delta_run_lens, - ARRAY_LEN(position_slot_delta_run_lens)); - - lzms_position_slot_base[LZMS_MAX_NUM_OFFSET_SYMS] = 0x7fffffff; + ARRAY_LEN(position_slot_delta_run_lens), + 0x7fffffff, + LZMS_MAX_NUM_OFFSET_SYMS); + + for (u32 order = 0; order < 30; order++) { + lzms_order_to_position_slot_bounds[order][0] = + lzms_get_slot(1U << order, lzms_position_slot_base, + LZMS_MAX_NUM_OFFSET_SYMS); + lzms_order_to_position_slot_bounds[order][1] = + lzms_get_slot((1U << (order + 1)) - 1, lzms_position_slot_base, + LZMS_MAX_NUM_OFFSET_SYMS); + } + /* Length slots */ lzms_decode_delta_rle_slot_bases(lzms_length_slot_base, + lzms_extra_length_bits, length_slot_delta_run_lens, - ARRAY_LEN(length_slot_delta_run_lens)); - - lzms_length_slot_base[LZMS_NUM_LEN_SYMS] = 0x400108ab; + ARRAY_LEN(length_slot_delta_run_lens), + 0x400108ab, + LZMS_NUM_LEN_SYMS); + + /* Create table mapping short lengths to length slots. */ + for (u32 slot = 0, i = 0; i < LZMS_NUM_FAST_LENGTHS; i++) { + if (i >= lzms_length_slot_base[slot + 1]) + slot++; + lzms_length_slot_fast[i] = slot; + } } -/* Initialize the global position length slot tables if not done so already. */ +/* Initialize the global position and length slot tables if not done so already. + * */ void -lzms_init_slot_bases(void) +lzms_init_slots(void) { - static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - static bool already_computed = false; - - if (unlikely(!already_computed)) { - pthread_mutex_lock(&mutex); - if (!already_computed) { - lzms_compute_slot_bases(); - already_computed = true; - } - pthread_mutex_unlock(&mutex); - } + static pthread_once_t once = PTHREAD_ONCE_INIT; + + pthread_once(&once, lzms_compute_slots); } static s32 @@ -164,9 +209,8 @@ lzms_maybe_do_x86_translation(u8 data[restrict], s32 i, s32 num_op_bytes, return i + 1; } -static s32 -lzms_may_x86_translate(const u8 p[restrict], - s32 *restrict max_offset_ret) +static inline s32 +lzms_may_x86_translate(const u8 p[restrict], s32 *restrict max_offset_ret) { /* Switch on first byte of the opcode, assuming it is really an x86 * instruction. */ @@ -227,31 +271,67 @@ lzms_may_x86_translate(const u8 p[restrict], * Translate relative addresses embedded in x86 instructions into absolute * addresses (@undo == %false), or undo this translation (@undo == %true). * - * @last_target_usages is a temporary array of length >= 65536. + * Absolute addresses are usually more compressible by LZ factorization. + * + * @last_target_usages must be a temporary array of length >= 65536. */ void -lzms_x86_filter(u8 data[restrict], - s32 size, - s32 last_target_usages[restrict], - bool undo) +lzms_x86_filter(u8 data[restrict], s32 size, + s32 last_target_usages[restrict], bool undo) { + /* + * Note: this filter runs unconditionally and uses a custom algorithm to + * detect data regions that probably contain x86 code. + * + * 'closest_target_usage' tracks the most recent position that has a + * good chance of being an x86 instruction. When the filter detects a + * likely x86 instruction, it updates this variable and considers the + * next 1023 bytes of data as valid for x86 translations. + * + * If part of the data does not, in fact, contain x86 machine code, then + * 'closest_target_usage' will, very likely, eventually fall more than + * 1023 bytes behind the current position. This results in x86 + * translations being disabled until the next likely x86 instruction is + * detected. + * + * Translations on relative call (e8 opcode) instructions are slightly + * more restricted. They require that the most recent likely x86 + * instruction was in the last 511 bytes, rather than the last 1023 + * bytes. + * + * To identify "likely x86 instructions", the algorithm attempts to + * track the position of the most recent potential relative-addressing + * instruction that referenced each possible memory address. If it + * finds two references to the same memory address within a 65535 byte + * window, the second reference is flagged as a likely x86 instruction. + * Since the instructions considered for translation necessarily use + * relative addressing, the algorithm does a tentative translation into + * absolute addresses. In addition, so that memory addresses can be + * looked up in an array of reasonable size (in this code, + * 'last_target_usages'), only the low-order 2 bytes of each address are + * considered significant. + */ + s32 closest_target_usage = -LZMS_X86_MAX_TRANSLATION_OFFSET - 1; for (s32 i = 0; i < 65536; i++) last_target_usages[i] = -LZMS_X86_MAX_GOOD_TARGET_OFFSET - 1; - for (s32 i = 0; i < size - 11; ) { + for (s32 i = 1; i < size - 16; ) { s32 max_trans_offset; s32 n; n = lzms_may_x86_translate(data + i, &max_trans_offset); + if (max_trans_offset) { + /* Recognized opcode. */ i = lzms_maybe_do_x86_translation(data, i, n, &closest_target_usage, last_target_usages, max_trans_offset, undo); } else { + /* Not a recognized opcode. */ i += n; } } @@ -260,7 +340,7 @@ lzms_x86_filter(u8 data[restrict], static void lzms_init_lz_lru_queues(struct lzms_lz_lru_queues *lz) { - /* Recent offsets for LZ matches */ + /* Recent offsets for LZ matches */ for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++) lz->recent_offsets[i] = i + 1; @@ -271,7 +351,7 @@ lzms_init_lz_lru_queues(struct lzms_lz_lru_queues *lz) static void lzms_init_delta_lru_queues(struct lzms_delta_lru_queues *delta) { - /* Recent offsets and powers for LZ matches */ + /* Recent offsets and powers for LZ matches */ for (u32 i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++) { delta->recent_offsets[i] = i + 1; delta->recent_powers[i] = 0; @@ -286,8 +366,8 @@ lzms_init_delta_lru_queues(struct lzms_delta_lru_queues *delta) void lzms_init_lru_queues(struct lzms_lru_queues *lru) { - lzms_init_lz_lru_queues(&lru->lz); - lzms_init_delta_lru_queues(&lru->delta); + lzms_init_lz_lru_queues(&lru->lz); + lzms_init_delta_lru_queues(&lru->delta); } void @@ -320,6 +400,6 @@ lzms_update_delta_lru_queues(struct lzms_delta_lru_queues *delta) void lzms_update_lru_queues(struct lzms_lru_queues *lru) { - lzms_update_lz_lru_queues(&lru->lz); - lzms_update_delta_lru_queues(&lru->delta); + lzms_update_lz_lru_queues(&lru->lz); + lzms_update_delta_lru_queues(&lru->delta); }