# include <emmintrin.h>
#endif
-/* LZX uses what it calls 'position slots' to represent match offsets.
- * What this means is that a small 'position slot' number and a small
- * offset from that slot are encoded instead of one large offset for
- * every match.
- * - lzx_position_base is an index to the position slot bases
- * - lzx_extra_bits states how many bits of offset-from-base data is needed.
+/* Mapping: position slot => first match offset that uses that position slot.
*/
-
const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS] = {
0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */
6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */
2097152 /* 50 */
};
+/* Mapping: position slot => how many extra bits must be read and added to the
+ * corresponding position base to decode the match offset. */
#ifdef USE_LZX_EXTRA_BITS_ARRAY
const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS] = {
0 , 0 , 0 , 0 , 1 ,
};
#endif
-/* LZX window size must be a power of 2 between 2^15 and 2^21, inclusively. */
-bool
-lzx_window_size_valid(size_t window_size)
+/* Round the specified compression block size (not LZX block size) up to the
+ * next valid LZX window size, and return its order (log2). Or, if the block
+ * size is 0 or greater than the largest valid LZX window size, return 0. */
+unsigned
+lzx_get_window_order(size_t max_block_size)
{
- if (window_size == 0 || (u32)window_size != window_size)
- return false;
- u32 order = bsr32(window_size);
- if (window_size != 1U << order)
- return false;
- return (order >= LZX_MIN_WINDOW_ORDER && order <= LZX_MAX_WINDOW_ORDER);
+ unsigned order;
+
+ if (max_block_size == 0 || max_block_size > (1 << LZX_MAX_WINDOW_ORDER))
+ return 0;
+
+ order = bsr32(max_block_size);
+
+ if ((1 << order) != max_block_size)
+ order++;
+
+ return max(order, LZX_MIN_WINDOW_ORDER);
}
-/* Given a valid LZX window size, return the number of symbols that will exist
+/* Given a valid LZX window order, return the number of symbols that will exist
* in the main Huffman code. */
unsigned
-lzx_get_num_main_syms(u32 window_size)
+lzx_get_num_main_syms(unsigned window_order)
{
+ u32 window_size = 1 << window_order;
+
/* NOTE: the calculation *should* be as follows:
*
* u32 max_offset = window_size - LZX_MIN_MATCH_LEN;
}
static void
-do_translate_target(s32 *target, s32 input_pos)
+do_translate_target(sle32 *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
}
static void
-undo_translate_target(s32 *target, s32 input_pos)
+undo_translate_target(sle32 *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
SSE2 case, it bloats the binary more. */
#endif
void
-lzx_e8_filter(u8 *data, s32 size, void (*process_target)(s32 *, s32))
+lzx_e8_filter(u8 *data, u32 size, void (*process_target)(sle32 *, s32))
{
#ifdef __SSE2__
/* SSE2 vectorized implementation for x86_64. This speeds up LZX
/* Do (or undo) the e8 translation. */
u8 *p8 = (u8 *)p128 + bit;
- (*process_target)((s32 *)(p8 + 1),
+ (*process_target)((sle32 *)(p8 + 1),
p8 - data);
/* Don't start an e8 translation in the
u8 *p8_end = data + size - 10;
do {
if (*p8 == 0xe8) {
- (*process_target)((s32 *)(p8 + 1), p8 - data);
+ (*process_target)((sle32 *)(p8 + 1), p8 - data);
p8 += 5;
} else {
p8++;
}
void
-lzx_do_e8_preprocessing(u8 *data, s32 size)
+lzx_do_e8_preprocessing(u8 *data, u32 size)
{
lzx_e8_filter(data, size, do_translate_target);
}
void
-lzx_undo_e8_preprocessing(u8 *data, s32 size)
+lzx_undo_e8_preprocessing(u8 *data, u32 size)
{
lzx_e8_filter(data, size, undo_translate_target);
}