* */
#include "wimlib/assert.h"
+#include "wimlib/compiler.h"
#include "wimlib/util.h"
#include "wimlib/types.h"
extern const u8 lzx_extra_bits[LZX_MAX_POSITION_SLOTS];
#endif
-/* Given the number of a LZX position slot, return the number of extra bits that
+/* Given the number of an LZX position slot, return the number of extra bits that
* are needed to encode the match offset. */
static inline unsigned
lzx_get_num_extra_bits(unsigned position_slot)
* the formatted offset without actually looking at the array.
*/
static inline unsigned
-lzx_get_position_slot_raw(unsigned formatted_offset)
+lzx_get_position_slot_raw(u32 formatted_offset)
{
if (formatted_offset >= 196608) {
return (formatted_offset >> 17) + 34;
}
}
-extern bool lzx_window_size_valid(u32 window_size);
+extern bool lzx_window_size_valid(size_t window_size);
extern unsigned lzx_get_num_main_syms(u32 window_size);
#define LZX_NUM_RECENT_OFFSETS 3
/* Least-recently used queue for match offsets. */
struct lzx_lru_queue {
u32 R[LZX_NUM_RECENT_OFFSETS];
-};
+}
+#ifdef __x86_64__
+_aligned_attribute(8) /* Improves performance of LZX compression by 1% - 2%;
+ specifically, this speeds up
+ lzx_get_near_optimal_match(). */
+#endif
+;
/* In the LZX format, an offset of n bytes is actually encoded
* as (n + LZX_OFFSET_OFFSET). */
#define LZX_OFFSET_OFFSET (LZX_NUM_RECENT_OFFSETS - 1)
+/* Initialize the LZX least-recently-used match offset queue at the beginning of
+ * a new window for either decompression or compression. */
static inline void
lzx_lru_queue_init(struct lzx_lru_queue *queue)
{
queue->R[i] = 1;
}
+extern void
+lzx_do_e8_preprocessing(u8 *data, s32 size);
+
+extern void
+lzx_undo_e8_preprocessing(u8 *data, s32 size);
+
#endif /* _WIMLIB_LZX_H */