* ----------------------------------------------------------------------------
*/
-#ifndef _BT_MATCHFINDER_H
-#define _BT_MATCHFINDER_H
-
-#ifndef MATCHFINDER_MAX_WINDOW_ORDER
-# error "MATCHFINDER_MAX_WINDOW_ORDER must be defined!"
-#endif
#include <string.h>
#include "wimlib/lz_extend.h"
#include "wimlib/lz_hash.h"
-#if MATCHFINDER_MAX_WINDOW_ORDER < 13
-# define BT_MATCHFINDER_HASH_ORDER 14
-#elif MATCHFINDER_MAX_WINDOW_ORDER < 15
-# define BT_MATCHFINDER_HASH_ORDER 15
-#else
-# define BT_MATCHFINDER_HASH_ORDER 16
-#endif
+#define BT_MATCHFINDER_HASH_ORDER 16
+
+/* TEMPLATED functions and structures have MF_SUFFIX appended to their name. */
+#undef TEMPLATED
+#define TEMPLATED(name) CONCAT(name, MF_SUFFIX)
+
+#ifndef _WIMLIB_BT_MATCHFINDER_H
+#define _WIMLIB_BT_MATCHFINDER_H
-#if MATCHFINDER_MAX_WINDOW_ORDER <= 16
-typedef u16 pos_t;
-#else
-typedef u32 pos_t;
-#endif
+/* Non-templated definitions */
/* Representation of a match found by the bt_matchfinder */
struct lz_match {
/* The number of bytes matched. */
- pos_t length;
+ u32 length;
/* The offset back from the current position that was matched. */
- pos_t offset;
+ u32 offset;
};
-struct bt_matchfinder {
+static inline u32
+bt_matchfinder_hash_3_bytes(const u8 *in_next)
+{
+ return lz_hash_3_bytes(in_next, BT_MATCHFINDER_HASH_ORDER);
+}
+
+#endif /* _WIMLIB_BT_MATCHFINDER_H */
+
+struct TEMPLATED(bt_matchfinder) {
pos_t hash_tab[1UL << BT_MATCHFINDER_HASH_ORDER];
pos_t child_tab[];
};
/* Return the number of bytes that must be allocated for a 'bt_matchfinder' that
* can work with buffers up to the specified size. */
static inline size_t
-bt_matchfinder_size(size_t max_bufsize)
+TEMPLATED(bt_matchfinder_size)(size_t max_bufsize)
{
- return sizeof(struct bt_matchfinder) + (2 * max_bufsize * sizeof(pos_t));
+ return sizeof(struct TEMPLATED(bt_matchfinder)) +
+ (2 * max_bufsize * sizeof(pos_t));
}
/* Prepare the matchfinder for a new input buffer. */
static inline void
-bt_matchfinder_init(struct bt_matchfinder *mf)
+TEMPLATED(bt_matchfinder_init)(struct TEMPLATED(bt_matchfinder) *mf)
{
memset(mf, 0, sizeof(*mf));
}
-static inline u32
-bt_matchfinder_hash_3_bytes(const u8 *in_next)
-{
- return lz_hash_3_bytes(in_next, BT_MATCHFINDER_HASH_ORDER);
-}
-
static inline pos_t *
-bt_child(struct bt_matchfinder *mf, pos_t node, int offset)
+TEMPLATED(bt_child)(struct TEMPLATED(bt_matchfinder) *mf, pos_t node, int offset)
{
- if (MATCHFINDER_MAX_WINDOW_ORDER < sizeof(pos_t) * 8) {
- /* no cast needed */
- return &mf->child_tab[(node << 1) + offset];
- } else {
- return &mf->child_tab[((size_t)node << 1) + offset];
- }
+ return &mf->child_tab[(node << 1) + offset];
}
static inline pos_t *
-bt_left_child(struct bt_matchfinder *mf, pos_t node)
+TEMPLATED(bt_left_child)(struct TEMPLATED(bt_matchfinder) *mf, pos_t node)
{
- return bt_child(mf, node, 0);
+ return TEMPLATED(bt_child)(mf, node, 0);
}
static inline pos_t *
-bt_right_child(struct bt_matchfinder *mf, pos_t node)
+TEMPLATED(bt_right_child)(struct TEMPLATED(bt_matchfinder) *mf, pos_t node)
{
- return bt_child(mf, node, 1);
+ return TEMPLATED(bt_child)(mf, node, 1);
}
/*
* array. (If no matches were found, this will be the same as @lz_matchptr.)
*/
static inline struct lz_match *
-bt_matchfinder_get_matches(struct bt_matchfinder * const restrict mf,
- const u8 * const in_begin,
- const u8 * const in_next,
- const unsigned min_len,
- const unsigned max_len,
- const unsigned nice_len,
- const unsigned max_search_depth,
- u32 * restrict next_hash,
- unsigned * restrict best_len_ret,
- struct lz_match * restrict lz_matchptr)
+TEMPLATED(bt_matchfinder_get_matches)(struct TEMPLATED(bt_matchfinder) * const restrict mf,
+ const u8 * const in_begin,
+ const u8 * const in_next,
+ const unsigned min_len,
+ const unsigned max_len,
+ const unsigned nice_len,
+ const unsigned max_search_depth,
+ u32 * restrict next_hash,
+ unsigned * restrict best_len_ret,
+ struct lz_match * restrict lz_matchptr)
{
unsigned depth_remaining = max_search_depth;
u32 hash;
mf->hash_tab[hash] = in_next - in_begin;
prefetchw(&mf->hash_tab[*next_hash]);
- pending_lt_ptr = bt_left_child(mf, in_next - in_begin);
- pending_gt_ptr = bt_right_child(mf, in_next - in_begin);
+ pending_lt_ptr = TEMPLATED(bt_left_child)(mf, in_next - in_begin);
+ pending_gt_ptr = TEMPLATED(bt_right_child)(mf, in_next - in_begin);
best_lt_len = 0;
best_gt_len = 0;
len = 0;
lz_matchptr->offset = in_next - matchptr;
lz_matchptr++;
if (len >= nice_len) {
- *pending_lt_ptr = *bt_left_child(mf, cur_node);
- *pending_gt_ptr = *bt_right_child(mf, cur_node);
+ *pending_lt_ptr = *TEMPLATED(bt_left_child)(mf, cur_node);
+ *pending_gt_ptr = *TEMPLATED(bt_right_child)(mf, cur_node);
*best_len_ret = best_len;
return lz_matchptr;
}
if (matchptr[len] < in_next[len]) {
*pending_lt_ptr = cur_node;
- pending_lt_ptr = bt_right_child(mf, cur_node);
+ pending_lt_ptr = TEMPLATED(bt_right_child)(mf, cur_node);
cur_node = *pending_lt_ptr;
best_lt_len = len;
if (best_gt_len < len)
len = best_gt_len;
} else {
*pending_gt_ptr = cur_node;
- pending_gt_ptr = bt_left_child(mf, cur_node);
+ pending_gt_ptr = TEMPLATED(bt_left_child)(mf, cur_node);
cur_node = *pending_gt_ptr;
best_gt_len = len;
if (best_lt_len < len)
* actually record any matches.
*/
static inline void
-bt_matchfinder_skip_position(struct bt_matchfinder * const restrict mf,
- const u8 * const in_begin,
- const u8 * const in_next,
- const u8 * const in_end,
- const unsigned nice_len,
- const unsigned max_search_depth,
- u32 * restrict next_hash)
+TEMPLATED(bt_matchfinder_skip_position)(struct TEMPLATED(bt_matchfinder) * const restrict mf,
+ const u8 * const in_begin,
+ const u8 * const in_next,
+ const u8 * const in_end,
+ const unsigned nice_len,
+ const unsigned max_search_depth,
+ u32 * restrict next_hash)
{
unsigned depth_remaining = max_search_depth;
u32 hash;
prefetchw(&mf->hash_tab[*next_hash]);
depth_remaining = max_search_depth;
- pending_lt_ptr = bt_left_child(mf, in_next - in_begin);
- pending_gt_ptr = bt_right_child(mf, in_next - in_begin);
+ pending_lt_ptr = TEMPLATED(bt_left_child)(mf, in_next - in_begin);
+ pending_gt_ptr = TEMPLATED(bt_right_child)(mf, in_next - in_begin);
best_lt_len = 0;
best_gt_len = 0;
len = 0;
if (matchptr[len] == in_next[len]) {
len = lz_extend(in_next, matchptr, len + 1, nice_len);
if (len == nice_len) {
- *pending_lt_ptr = *bt_left_child(mf, cur_node);
- *pending_gt_ptr = *bt_right_child(mf, cur_node);
+ *pending_lt_ptr = *TEMPLATED(bt_left_child)(mf, cur_node);
+ *pending_gt_ptr = *TEMPLATED(bt_right_child)(mf, cur_node);
return;
}
}
if (matchptr[len] < in_next[len]) {
*pending_lt_ptr = cur_node;
- pending_lt_ptr = bt_right_child(mf, cur_node);
+ pending_lt_ptr = TEMPLATED(bt_right_child)(mf, cur_node);
cur_node = *pending_lt_ptr;
best_lt_len = len;
if (best_gt_len < len)
len = best_gt_len;
} else {
*pending_gt_ptr = cur_node;
- pending_gt_ptr = bt_left_child(mf, cur_node);
+ pending_gt_ptr = TEMPLATED(bt_left_child)(mf, cur_node);
cur_node = *pending_gt_ptr;
best_gt_len = len;
if (best_lt_len < len)
}
}
}
-
-#endif /* _BT_MATCHFINDER_H */
*
* Notes on usage
*
- * You must define MATCHFINDER_MAX_WINDOW_ORDER before including this header
- * because that determines which integer type to use for positions. Since
- * 16-bit integers are faster than 32-bit integers due to reduced memory usage
- * (and therefore reduced cache pressure), the code only uses 32-bit integers if
- * they are needed to represent all possible positions.
+ * Before including this header, you must define 'pos_t' to an integer type that
+ * can represent all possible positions. This can be a 16-bit or 32-bit
+ * unsigned integer. When possible, the former should be used due to the
+ * reduced cache pressure. This header can be included multiple times in a
+ * single .c file with different 'pos_t' definitions; however, you must define a
+ * different MF_SUFFIX each time to generate different names for the matchfinder
+ * structure and functions.
*
* The number of bytes that must be allocated for a given 'struct
* hc_matchfinder' must be gotten by calling hc_matchfinder_size().
* ----------------------------------------------------------------------------
*/
-#ifndef _HC_MATCHFINDER_H
-#define _HC_MATCHFINDER_H
-
-#ifndef MATCHFINDER_MAX_WINDOW_ORDER
-# error "MATCHFINDER_MAX_WINDOW_ORDER must be defined!"
-#endif
-
#include <string.h>
#include "wimlib/lz_extend.h"
#include "wimlib/lz_hash.h"
#include "wimlib/unaligned.h"
-#if MATCHFINDER_MAX_WINDOW_ORDER <= 16
-typedef u16 pos_t;
-#else
-typedef u32 pos_t;
-#endif
-
#define HC_MATCHFINDER_HASH3_ORDER 14
#define HC_MATCHFINDER_HASH4_ORDER 15
-struct hc_matchfinder {
+/* TEMPLATED functions and structures have MF_SUFFIX appended to their name. */
+#undef TEMPLATED
+#define TEMPLATED(name) CONCAT(name, MF_SUFFIX)
+
+struct TEMPLATED(hc_matchfinder) {
/* The hash table for finding length 3 matches */
pos_t hash3_tab[1UL << HC_MATCHFINDER_HASH3_ORDER];
/* Return the number of bytes that must be allocated for a 'hc_matchfinder' that
* can work with buffers up to the specified size. */
static inline size_t
-hc_matchfinder_size(size_t max_bufsize)
+TEMPLATED(hc_matchfinder_size)(size_t max_bufsize)
{
- return sizeof(struct hc_matchfinder) + (max_bufsize * sizeof(pos_t));
+ return sizeof(struct TEMPLATED(hc_matchfinder)) +
+ (max_bufsize * sizeof(pos_t));
}
/* Prepare the matchfinder for a new input buffer. */
static inline void
-hc_matchfinder_init(struct hc_matchfinder *mf)
+TEMPLATED(hc_matchfinder_init)(struct TEMPLATED(hc_matchfinder) *mf)
{
memset(mf, 0, sizeof(*mf));
}
* 'best_len' was found.
*/
static inline u32
-hc_matchfinder_longest_match(struct hc_matchfinder * const restrict mf,
- const u8 * const restrict in_begin,
- const ptrdiff_t cur_pos,
- u32 best_len,
- const u32 max_len,
- const u32 nice_len,
- const u32 max_search_depth,
- u32 next_hashes[const restrict static 2],
- u32 * const restrict offset_ret)
+TEMPLATED(hc_matchfinder_longest_match)(struct TEMPLATED(hc_matchfinder) * const restrict mf,
+ const u8 * const restrict in_begin,
+ const ptrdiff_t cur_pos,
+ u32 best_len,
+ const u32 max_len,
+ const u32 nice_len,
+ const u32 max_search_depth,
+ u32 next_hashes[const restrict static 2],
+ u32 * const restrict offset_ret)
{
const u8 *in_next = in_begin + cur_pos;
u32 depth_remaining = max_search_depth;
* Returns @in_next + @count.
*/
static inline const u8 *
-hc_matchfinder_skip_positions(struct hc_matchfinder * const restrict mf,
- const u8 * const restrict in_begin,
- const ptrdiff_t cur_pos,
- const ptrdiff_t end_pos,
- const u32 count,
- u32 next_hashes[const restrict static 2])
+TEMPLATED(hc_matchfinder_skip_positions)(struct TEMPLATED(hc_matchfinder) * const restrict mf,
+ const u8 * const restrict in_begin,
+ const ptrdiff_t cur_pos,
+ const ptrdiff_t end_pos,
+ const u32 count,
+ u32 next_hashes[const restrict static 2])
{
const u8 *in_next = in_begin + cur_pos;
const u8 * const stop_ptr = in_next + count;
return stop_ptr;
}
-
-#endif /* _HC_MATCHFINDER_H */
#define ALIGNED_CODEWORD_LIMIT 7
#define PRE_CODEWORD_LIMIT 7
-#include "wimlib/lzx_common.h"
-
-/*
- * The maximum allowed window order for the matchfinder.
- */
-#define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
-
-#include <string.h>
-
-#include "wimlib/bt_matchfinder.h"
#include "wimlib/compress_common.h"
#include "wimlib/compressor_ops.h"
#include "wimlib/error.h"
-#include "wimlib/hc_matchfinder.h"
#include "wimlib/lz_extend.h"
+#include "wimlib/lzx_common.h"
#include "wimlib/unaligned.h"
#include "wimlib/util.h"
+/* Matchfinders with 16-bit positions */
+#define pos_t u16
+#define MF_SUFFIX _16
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
+/* Matchfinders with 32-bit positions */
+#undef pos_t
+#undef MF_SUFFIX
+#define pos_t u32
+#define MF_SUFFIX _32
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
struct lzx_output_bitstream;
/* Codewords for the LZX Huffman codes. */
/* Data for greedy or lazy parsing */
struct {
/* Hash chains matchfinder (MUST BE LAST!!!) */
- struct hc_matchfinder hc_mf;
+ union {
+ struct hc_matchfinder_16 hc_mf_16;
+ struct hc_matchfinder_32 hc_mf_32;
+ };
};
/* Data for near-optimal parsing */
LZX_MAX_MATCH_LEN - 1];
/* Hash table for finding length 2 matches */
- pos_t hash2_tab[LZX_HASH2_LENGTH];
+ u32 hash2_tab[LZX_HASH2_LENGTH];
/* Binary trees matchfinder (MUST BE LAST!!!) */
- struct bt_matchfinder bt_mf;
+ union {
+ struct bt_matchfinder_16 bt_mf_16;
+ struct bt_matchfinder_32 bt_mf_32;
+ };
};
};
};
+/*
+ * Will a matchfinder using 16-bit positions be sufficient for compressing
+ * buffers of up to the specified size? The limit could be 65536 bytes, but we
+ * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
+ * This requires that the limit be no more than the length of offset_slot_tab_1
+ * (currently 32768).
+ */
+static inline bool
+lzx_is_16_bit(size_t max_bufsize)
+{
+ STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
+ return max_bufsize <= 32768;
+}
+
+/*
+ * The following macros call either the 16-bit or the 32-bit version of a
+ * matchfinder function based on the value of 'is_16_bit', which will be known
+ * at compilation time.
+ */
+
+#define CALL_HC_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
+
+#define CALL_BT_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
+
/*
* Structure to keep track of the current state of sending bits to the
* compressed output buffer.
* compressor's acceleration tables to speed up the mapping.
*/
static inline unsigned
-lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
+lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
+ bool is_16_bit)
{
- if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
+ if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
return c->offset_slot_tab_1[adjusted_offset];
return c->offset_slot_tab_2[adjusted_offset >> 14];
}
* offsets queue. */
static inline void
lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
- u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
+ u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
{
u32 litrunlen = *litrunlen_p;
}
/* Compute the offset slot */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
/* Compute the match header. */
v += offset_slot * LZX_NUM_LEN_HEADERS;
* beginning of the block), but this doesn't matter because this function only
* computes frequencies.
*/
-static void
-lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
+static inline void
+lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
u32 node_idx = block_size;
for (;;) {
}
/* Tally the main symbol. */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
v += offset_slot * LZX_NUM_LEN_HEADERS;
c->freqs.main[LZX_NUM_CHARS + v]++;
* first-to-last order. The return value is the index in c->chosen_sequences at
* which the lzx_sequences begin.
*/
-static u32
-lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
+static inline u32
+lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
u32 node_idx = block_size;
u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
}
/* Tally the main symbol. */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
v += offset_slot * LZX_NUM_LEN_HEADERS;
c->freqs.main[LZX_NUM_CHARS + v]++;
* later. The algorithm does not solve this problem; it only considers the
* lowest cost to reach each individual position.
*/
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
const u8 * const restrict block_begin,
const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
struct lzx_optimum_node *cur_node = c->optimum_nodes;
struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
do {
u32 offset = cache_ptr->offset;
u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
- unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
+ is_16_bit);
do {
u32 cost = cur_node->cost +
c->costs.match_cost[offset_slot][
lzx_compute_match_costs(c);
}
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
struct lzx_output_bitstream * const restrict os,
const u8 * const restrict block_begin,
const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
unsigned num_passes_remaining = c->num_optim_passes;
struct lzx_lru_queue new_queue;
lzx_reset_symbol_frequencies(c);
do {
new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
- initial_queue);
+ initial_queue, is_16_bit);
if (num_passes_remaining > 1) {
- lzx_tally_item_list(c, block_size);
+ lzx_tally_item_list(c, block_size, is_16_bit);
lzx_make_huffman_codes(c);
lzx_update_costs(c);
lzx_reset_symbol_frequencies(c);
}
} while (--num_passes_remaining);
- seq_idx = lzx_record_item_list(c, block_size);
+ seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
lzx_finish_block(c, os, block_begin, block_size, seq_idx);
return new_queue;
}
* time, but rather to produce a compression ratio significantly better than a
* simpler "greedy" or "lazy" parse while still being relatively fast.
*/
-static void
+static inline void
lzx_compress_near_optimal(struct lzx_compressor *c,
- struct lzx_output_bitstream *os)
+ struct lzx_output_bitstream *os,
+ bool is_16_bit)
{
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
u32 next_hash;
struct lzx_lru_queue queue;
- bt_matchfinder_init(&c->bt_mf);
+ CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
next_hash = bt_matchfinder_hash_3_bytes(in_next);
lzx_lru_queue_init(&queue);
}
/* Check for matches of length >= 3. */
- lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
- in_begin,
- in_next,
- 3,
- max_len,
- nice_len,
- c->max_search_depth,
- &next_hash,
- &best_len,
- lz_matchptr);
+ lz_matchptr = CALL_BT_MF(is_16_bit, c, bt_matchfinder_get_matches,
+ in_begin,
+ in_next,
+ 3,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ &next_hash,
+ &best_len,
+ lz_matchptr);
in_next++;
cache_ptr->length = lz_matchptr - (cache_ptr + 1);
cache_ptr = lz_matchptr;
}
c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
in_next - in_begin;
- bt_matchfinder_skip_position(&c->bt_mf,
- in_begin,
- in_next,
- in_end,
- nice_len,
- c->max_search_depth,
- &next_hash);
+ CALL_BT_MF(is_16_bit, c, bt_matchfinder_skip_position,
+ in_begin,
+ in_next,
+ in_end,
+ nice_len,
+ c->max_search_depth,
+ &next_hash);
in_next++;
cache_ptr->length = 0;
cache_ptr++;
queue = lzx_optimize_and_write_block(c, os, in_block_begin,
in_next - in_block_begin,
- queue);
+ queue, is_16_bit);
} while (in_next != in_end);
}
+static void
+lzx_compress_near_optimal_16(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, os, true);
+}
+
+static void
+lzx_compress_near_optimal_32(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, os, false);
+}
+
/*
* Given a pointer to the current byte sequence and the current list of recent
* match offsets, find the longest repeat offset match.
}
/* This is the "lazy" LZX compressor. */
-static void
-lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+static inline void
+lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+ bool is_16_bit)
{
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
u32 recent_offsets[3] = {1, 1, 1};
u32 next_hashes[2] = {};
- hc_matchfinder_init(&c->hc_mf);
+ CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
do {
/* Starting a new block */
/* Find the longest match at the current position. */
- cur_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- 2,
- max_len,
- nice_len,
- c->max_search_depth,
- next_hashes,
- &cur_offset);
+ cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ 2,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &cur_offset);
if (cur_len < 3 ||
(cur_len == 3 &&
cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
nice_len = min(max_len, nice_len);
}
- next_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- cur_len - 2,
- max_len,
- nice_len,
- c->max_search_depth / 2,
- next_hashes,
- &next_offset);
+ next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ cur_len - 2,
+ max_len,
+ nice_len,
+ c->max_search_depth / 2,
+ next_hashes,
+ &next_offset);
if (next_len <= cur_len - 2) {
in_next++;
choose_cur_match:
lzx_record_match(c, cur_len, cur_offset_data,
- recent_offsets, &litrunlen, &next_seq);
- in_next = hc_matchfinder_skip_positions(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- in_end - in_begin,
- skip_len,
- next_hashes);
+ recent_offsets, is_16_bit,
+ &litrunlen, &next_seq);
+ in_next = CALL_HC_MF(is_16_bit, c, hc_matchfinder_skip_positions,
+ in_begin,
+ in_next - in_begin,
+ in_end - in_begin,
+ skip_len,
+ next_hashes);
} while (in_next < in_block_end);
lzx_finish_sequence(next_seq, litrunlen);
} while (in_next != in_end);
}
+static void
+lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+ lzx_compress_lazy(c, os, true);
+}
+
+static void
+lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+ lzx_compress_lazy(c, os, false);
+}
+
/* Generate the acceleration tables for offset slots. */
static void
lzx_init_offset_slot_tabs(struct lzx_compressor *c)
lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
{
if (compression_level <= LZX_MAX_FAST_LEVEL) {
- return offsetof(struct lzx_compressor, hc_mf) +
- hc_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, hc_mf_16) +
+ hc_matchfinder_size_16(max_bufsize);
+ else
+ return offsetof(struct lzx_compressor, hc_mf_32) +
+ hc_matchfinder_size_32(max_bufsize);
} else {
- return offsetof(struct lzx_compressor, bt_mf) +
- bt_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, bt_mf_16) +
+ bt_matchfinder_size_16(max_bufsize);
+ else
+ return offsetof(struct lzx_compressor, bt_mf_32) +
+ bt_matchfinder_size_32(max_bufsize);
}
}
/* Fast compression: Use lazy parsing. */
- c->impl = lzx_compress_lazy;
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_lazy_16;
+ else
+ c->impl = lzx_compress_lazy_32;
c->max_search_depth = (36 * compression_level) / 20;
c->nice_match_length = (72 * compression_level) / 20;
/* Normal / high compression: Use near-optimal parsing. */
- c->impl = lzx_compress_near_optimal;
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_near_optimal_16;
+ else
+ c->impl = lzx_compress_near_optimal_32;
/* Scale nice_match_length and max_search_depth with the
* compression level. */