4 * Declarations shared between LZX compression and decompression.
10 #include "wimlib/assert.h"
11 #include "wimlib/compiler.h"
12 #include "wimlib/lzx_constants.h"
13 #include "wimlib/util.h"
14 #include "wimlib/types.h"
16 //#define ENABLE_LZX_DEBUG
17 #ifdef ENABLE_LZX_DEBUG
18 # define LZX_DEBUG DEBUG
19 # define LZX_ASSERT wimlib_assert
21 # define LZX_DEBUG(format, ...)
22 # define LZX_ASSERT(...)
25 #define USE_LZX_EXTRA_BITS_ARRAY
27 #ifdef USE_LZX_EXTRA_BITS_ARRAY
28 extern const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS];
31 /* Given the number of an LZX position slot, return the number of extra bits that
32 * are needed to encode the match offset. */
33 static inline unsigned
34 lzx_get_num_extra_bits(unsigned position_slot)
36 #ifdef USE_LZX_EXTRA_BITS_ARRAY
38 return lzx_extra_bits[position_slot];
40 /* Calculate directly using a shift and subtraction. */
41 LZX_ASSERT(position_slot >= 2 && position_slot <= 37);
42 return (position_slot >> 1) - 1;
46 extern const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS];
48 /* Returns the LZX position slot that corresponds to a given formatted offset.
50 * Logically, this returns the smallest i such that
51 * formatted_offset >= lzx_position_base[i].
53 * The actual implementation below takes advantage of the regularity of the
54 * numbers in the lzx_position_base array to calculate the slot directly from
55 * the formatted offset without actually looking at the array.
57 static inline unsigned
58 lzx_get_position_slot_raw(u32 formatted_offset)
60 if (formatted_offset >= 196608) {
61 return (formatted_offset >> 17) + 34;
63 LZX_ASSERT(2 <= formatted_offset && formatted_offset < 655360);
64 unsigned mssb_idx = bsr32(formatted_offset);
65 return (mssb_idx << 1) |
66 ((formatted_offset >> (mssb_idx - 1)) & 1);
70 extern bool lzx_window_size_valid(size_t window_size);
71 extern unsigned lzx_get_num_main_syms(u32 window_size);
73 /* Least-recently used queue for match offsets. */
74 struct lzx_lru_queue {
75 u32 R[LZX_NUM_RECENT_OFFSETS];
78 _aligned_attribute(8) /* Improves performance of LZX compression by 1% - 2%;
79 specifically, this speeds up
80 lzx_choose_near_optimal_item(). */
84 /* Initialize the LZX least-recently-used match offset queue at the beginning of
85 * a new window for either decompression or compression. */
87 lzx_lru_queue_init(struct lzx_lru_queue *queue)
89 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
94 lzx_do_e8_preprocessing(u8 *data, u32 size);
97 lzx_undo_e8_preprocessing(u8 *data, u32 size);
99 #endif /* _WIMLIB_LZX_H */