# include <emmintrin.h>
#endif
-/* LZX uses what it calls 'position slots' to represent match offsets.
- * What this means is that a small 'position slot' number and a small
- * offset from that slot are encoded instead of one large offset for
- * every match.
- * - lzx_position_base is an index to the position slot bases
- * - lzx_extra_bits states how many bits of offset-from-base data is needed.
+/* Mapping: offset slot => first match offset that uses that offset slot.
*/
-
-const u32 lzx_position_base[LZX_MAX_POSITION_SLOTS] = {
+const u32 lzx_offset_slot_base[LZX_MAX_OFFSET_SLOTS] = {
0 , 1 , 2 , 3 , 4 , /* 0 --- 4 */
6 , 8 , 12 , 16 , 24 , /* 5 --- 9 */
32 , 48 , 64 , 96 , 128 , /* 10 --- 14 */
2097152 /* 50 */
};
-#ifdef USE_LZX_EXTRA_BITS_ARRAY
-const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS] = {
+/* Mapping: offset slot => how many extra bits must be read and added to the
+ * corresponding offset slot base to decode the match offset. */
+const u8 lzx_extra_offset_bits[LZX_MAX_OFFSET_SLOTS] = {
0 , 0 , 0 , 0 , 1 ,
1 , 2 , 2 , 3 , 3 ,
4 , 4 , 5 , 5 , 6 ,
17, 17, 17, 17, 17,
17
};
-#endif
-/* LZX window size must be a power of 2 between 2^15 and 2^21, inclusively. */
-bool
-lzx_window_size_valid(size_t window_size)
+/* Round the specified compression block size (not LZX block size) up to the
+ * next valid LZX window size, and return its order (log2). Or, if the block
+ * size is 0 or greater than the largest valid LZX window size, return 0. */
+unsigned
+lzx_get_window_order(size_t max_block_size)
{
- if (window_size == 0 || (u32)window_size != window_size)
- return false;
- u32 order = bsr32(window_size);
- if (window_size != 1U << order)
- return false;
- return (order >= LZX_MIN_WINDOW_ORDER && order <= LZX_MAX_WINDOW_ORDER);
+ unsigned order;
+
+ if (max_block_size == 0 || max_block_size > LZX_MAX_WINDOW_SIZE)
+ return 0;
+
+ order = bsr32(max_block_size);
+
+ if (((u32)1 << order) != max_block_size)
+ order++;
+
+ return max(order, LZX_MIN_WINDOW_ORDER);
}
-/* Given a valid LZX window size, return the number of symbols that will exist
+/* Given a valid LZX window order, return the number of symbols that will exist
* in the main Huffman code. */
unsigned
-lzx_get_num_main_syms(u32 window_size)
+lzx_get_num_main_syms(unsigned window_order)
{
+ u32 window_size = (u32)1 << window_order;
+
/* NOTE: the calculation *should* be as follows:
*
* u32 max_offset = window_size - LZX_MIN_MATCH_LEN;
- * u32 max_formatted_offset = max_offset + LZX_OFFSET_OFFSET;
- * u32 num_position_slots = 1 + lzx_get_position_slot_raw(max_formatted_offset);
+ * u32 max_adjusted_offset = max_offset + LZX_OFFSET_OFFSET;
+ * u32 num_offset_slots = 1 + lzx_get_offset_slot_raw(max_adjusted_offset);
*
* However since LZX_MIN_MATCH_LEN == LZX_OFFSET_OFFSET, we would get
- * max_formatted_offset == window_size, which would bump the number of
- * position slots up by 1 since every valid LZX window size is equal to
- * a position base value. The format doesn't do this, and instead
+ * max_adjusted_offset == window_size, which would bump the number of
+ * offset slots up by 1 since every valid LZX window size is equal to a
+ * offset slot base value. The format doesn't do this, and instead
* disallows matches with minimum length and maximum offset. This sets
- * max_formatted_offset = window_size - 1, so instead we must calculate:
+ * max_adjusted_offset = window_size - 1, so instead we must calculate:
*
- * num_position_slots = 1 + lzx_get_position_slot_raw(window_size - 1);
+ * num_offset_slots = 1 + lzx_get_offset_slot_raw(window_size - 1);
*
* ... which is the same as
*
- * num_position_slots = lzx_get_position_slot_raw(window_size);
+ * num_offset_slots = lzx_get_offset_slot_raw(window_size);
*
- * ... since every valid window size is equal to a position base value.
+ * ... since every valid window size is equal to an offset base value.
*/
- unsigned num_position_slots = lzx_get_position_slot_raw(window_size);
+ unsigned num_offset_slots = lzx_get_offset_slot_raw(window_size);
/* Now calculate the number of main symbols as LZX_NUM_CHARS literal
- * symbols, plus 8 symbols per position slot (since there are 8 possible
- * length headers, and we need all (position slot, length header)
+ * symbols, plus 8 symbols per offset slot (since there are 8 possible
+ * length headers, and we need all (offset slot, length header)
* combinations). */
- return LZX_NUM_CHARS + (num_position_slots << 3);
+ return LZX_NUM_CHARS + (num_offset_slots << 3);
}
static void
-do_translate_target(s32 *target, s32 input_pos)
+do_translate_target(sle32 *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
}
static void
-undo_translate_target(s32 *target, s32 input_pos)
+undo_translate_target(sle32 *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
SSE2 case, it bloats the binary more. */
#endif
void
-lzx_e8_filter(u8 *data, s32 size, void (*process_target)(s32 *, s32))
+lzx_e8_filter(u8 *data, u32 size, void (*process_target)(sle32 *, s32))
{
#ifdef __SSE2__
/* SSE2 vectorized implementation for x86_64. This speeds up LZX
/* Do (or undo) the e8 translation. */
u8 *p8 = (u8 *)p128 + bit;
- (*process_target)((s32 *)(p8 + 1),
+ (*process_target)((sle32 *)(p8 + 1),
p8 - data);
/* Don't start an e8 translation in the
u8 *p8_end = data + size - 10;
do {
if (*p8 == 0xe8) {
- (*process_target)((s32 *)(p8 + 1), p8 - data);
+ (*process_target)((sle32 *)(p8 + 1), p8 - data);
p8 += 5;
} else {
p8++;
}
void
-lzx_do_e8_preprocessing(u8 *data, s32 size)
+lzx_do_e8_preprocessing(u8 *data, u32 size)
{
lzx_e8_filter(data, size, do_translate_target);
}
void
-lzx_undo_e8_preprocessing(u8 *data, s32 size)
+lzx_undo_e8_preprocessing(u8 *data, u32 size)
{
lzx_e8_filter(data, size, undo_translate_target);
}