4 * Declarations shared between LZX compression and decompression.
10 #include "wimlib/assert.h"
11 #include "wimlib/bitops.h"
12 #include "wimlib/compiler.h"
13 #include "wimlib/lzx_constants.h"
14 #include "wimlib/util.h"
15 #include "wimlib/types.h"
17 //#define ENABLE_LZX_DEBUG
18 #ifdef ENABLE_LZX_DEBUG
19 # define LZX_ASSERT wimlib_assert
21 # define LZX_ASSERT(...)
24 extern const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS];
26 extern const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS];
28 /* Returns the LZX offset slot that corresponds to a given adjusted offset.
30 * Logically, this returns the smallest i such that
31 * adjusted_offset >= lzx_offset_slot_base[i].
33 * The actual implementation below takes advantage of the regularity of the
34 * numbers in the lzx_offset_slot_base array to calculate the slot directly from
35 * the adjusted offset without actually looking at the array.
37 static inline unsigned
38 lzx_get_offset_slot_raw(u32 adjusted_offset)
40 if (adjusted_offset >= 196608) {
41 return (adjusted_offset >> 17) + 34;
43 LZX_ASSERT(2 <= adjusted_offset && adjusted_offset < 655360);
44 unsigned mssb_idx = fls32(adjusted_offset);
45 return (mssb_idx << 1) |
46 ((adjusted_offset >> (mssb_idx - 1)) & 1);
50 extern unsigned lzx_get_window_order(size_t max_block_size);
52 extern unsigned lzx_get_num_main_syms(unsigned window_order);
54 /* Least-recently used queue for match offsets. */
55 struct lzx_lru_queue {
56 u32 R[LZX_NUM_RECENT_OFFSETS];
57 } _aligned_attribute(sizeof(unsigned long));
59 /* Initialize the LZX least-recently-used match offset queue at the beginning of
60 * a new window for either decompression or compression. */
62 lzx_lru_queue_init(struct lzx_lru_queue *queue)
64 for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
69 lzx_do_e8_preprocessing(u8 *data, u32 size);
72 lzx_undo_e8_preprocessing(u8 *data, u32 size);
74 #endif /* _LZX_COMMON_H */