X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=include%2Fwimlib%2Flzx.h;h=539edf303591eb168c53820486b7f7e794f3abea;hb=919b97754cedca6f37dd1808b00198dc3cb8bf98;hp=1f4c1e3f9b77b5acf202dc40b6e2334fca67d2d0;hpb=2fdc3bd720f5bc49680dc2284ea42a537d1acc07;p=wimlib diff --git a/include/wimlib/lzx.h b/include/wimlib/lzx.h index 1f4c1e3f..539edf30 100644 --- a/include/wimlib/lzx.h +++ b/include/wimlib/lzx.h @@ -6,6 +6,7 @@ * */ #include "wimlib/assert.h" +#include "wimlib/compiler.h" #include "wimlib/util.h" #include "wimlib/types.h" @@ -122,7 +123,7 @@ extern const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS]; * the formatted offset without actually looking at the array. */ static inline unsigned -lzx_get_position_slot_raw(unsigned formatted_offset) +lzx_get_position_slot_raw(u32 formatted_offset) { if (formatted_offset >= 196608) { return (formatted_offset >> 17) + 34; @@ -142,7 +143,13 @@ extern unsigned lzx_get_num_main_syms(u32 window_size); /* Least-recently used queue for match offsets. */ struct lzx_lru_queue { u32 R[LZX_NUM_RECENT_OFFSETS]; -}; +} +#ifdef __x86_64__ +_aligned_attribute(8) /* Improves performance of LZX compression by 1% - 2%; + specifically, this speeds up + lzx_choose_near_optimal_match(). */ +#endif +; /* In the LZX format, an offset of n bytes is actually encoded * as (n + LZX_OFFSET_OFFSET). */ @@ -157,4 +164,10 @@ lzx_lru_queue_init(struct lzx_lru_queue *queue) queue->R[i] = 1; } +extern void +lzx_do_e8_preprocessing(u8 *data, s32 size); + +extern void +lzx_undo_e8_preprocessing(u8 *data, s32 size); + #endif /* _WIMLIB_LZX_H */