]> wimlib.net Git - wimlib/blobdiff - src/lzx_compress.c
lzx_compress.c: minor comment fix
[wimlib] / src / lzx_compress.c
index b421c65ccd2808eb760078ae84084132140b48a0..520c07e33572081b7756a9c3622733522569817a 100644 (file)
 #define LZX_BIT_COST           16
 
 /*
- * Consideration of aligned offset costs is disabled for now, due to
- * insufficient benefit gained from the time spent.
+ * Should the compressor take into account the costs of aligned offset symbols?
  */
-#define LZX_CONSIDER_ALIGNED_COSTS     0
+#define LZX_CONSIDER_ALIGNED_COSTS     1
 
 /*
  * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
 #define LZX_MAX_FAST_LEVEL     34
 
 /*
- * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
- * for finding length 2 matches.  This can be as high as 16 (in which case the
- * hash function is trivial), but using a smaller hash table speeds up
- * compression due to reduced cache pressure.
+ * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
+ * hash table for finding length 2 matches.  This could be as high as 16, but
+ * using a smaller hash table speeds up compression due to reduced cache
+ * pressure.
  */
-#define LZX_HASH2_ORDER                12
-#define LZX_HASH2_LENGTH       (1UL << LZX_HASH2_ORDER)
+#define BT_MATCHFINDER_HASH2_ORDER     12
 
 /*
  * These are the compressor-side limits on the codeword lengths for each Huffman
 #define ALIGNED_CODEWORD_LIMIT 7
 #define PRE_CODEWORD_LIMIT     7
 
-#include "wimlib/lzx_common.h"
-
-/*
- * The maximum allowed window order for the matchfinder.
- */
-#define MATCHFINDER_MAX_WINDOW_ORDER   LZX_MAX_WINDOW_ORDER
-
-#include <string.h>
-
-#include "wimlib/bt_matchfinder.h"
 #include "wimlib/compress_common.h"
 #include "wimlib/compressor_ops.h"
 #include "wimlib/error.h"
-#include "wimlib/hc_matchfinder.h"
 #include "wimlib/lz_extend.h"
+#include "wimlib/lzx_common.h"
 #include "wimlib/unaligned.h"
 #include "wimlib/util.h"
 
+/* Matchfinders with 16-bit positions  */
+#define mf_pos_t       u16
+#define MF_SUFFIX      _16
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
+/* Matchfinders with 32-bit positions  */
+#undef mf_pos_t
+#undef MF_SUFFIX
+#define mf_pos_t       u32
+#define MF_SUFFIX      _32
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
 struct lzx_output_bitstream;
 
 /* Codewords for the LZX Huffman codes.  */
@@ -230,9 +232,10 @@ struct lzx_sequence {
        u16 adjusted_length;
 
        /* If bit 31 is clear, then this field contains the match header in bits
-        * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
-        * Otherwise, this sequence's literal run was the last literal run in
-        * the block, so there is no match that follows it.  */
+        * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
+        * recent offset code in bits 9-30.  Otherwise (if bit 31 is set), this
+        * sequence's literal run was the last literal run in the block, so
+        * there is no match that follows it.  */
        u32 adjusted_offset_and_match_hdr;
 };
 
@@ -329,15 +332,6 @@ lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
        };
 }
 
-/* Pop a match offset off the front (most recently used) end of the queue.  */
-static inline u32
-lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
-{
-       u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
-       queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
-       return offset;
-}
-
 /* Swap a match offset to the front of the queue.  */
 static inline struct lzx_lru_queue
 lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
@@ -408,8 +402,10 @@ struct lzx_compressor {
 
        /* The matches and literals that the parser has chosen for the current
         * block.  The required length of this array is limited by the maximum
-        * number of matches that can ever be chosen for a single block.  */
-       struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
+        * number of matches that can ever be chosen for a single block, plus
+        * one for the special entry at the end.  */
+       struct lzx_sequence chosen_sequences[
+                      DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
 
        /* Tables for mapping adjusted offsets to offset slots  */
 
@@ -423,7 +419,10 @@ struct lzx_compressor {
                /* Data for greedy or lazy parsing  */
                struct {
                        /* Hash chains matchfinder (MUST BE LAST!!!)  */
-                       struct hc_matchfinder hc_mf;
+                       union {
+                               struct hc_matchfinder_16 hc_mf_16;
+                               struct hc_matchfinder_32 hc_mf_32;
+                       };
                };
 
                /* Data for near-optimal parsing  */
@@ -478,15 +477,43 @@ struct lzx_compressor {
                                                    LZX_MAX_MATCHES_PER_POS +
                                                    LZX_MAX_MATCH_LEN - 1];
 
-                       /* Hash table for finding length 2 matches  */
-                       pos_t hash2_tab[LZX_HASH2_LENGTH];
-
                        /* Binary trees matchfinder (MUST BE LAST!!!)  */
-                       struct bt_matchfinder bt_mf;
+                       union {
+                               struct bt_matchfinder_16 bt_mf_16;
+                               struct bt_matchfinder_32 bt_mf_32;
+                       };
                };
        };
 };
 
+/*
+ * Will a matchfinder using 16-bit positions be sufficient for compressing
+ * buffers of up to the specified size?  The limit could be 65536 bytes, but we
+ * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
+ * This requires that the limit be no more than the length of offset_slot_tab_1
+ * (currently 32768).
+ */
+static inline bool
+lzx_is_16_bit(size_t max_bufsize)
+{
+       STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
+       return max_bufsize <= 32768;
+}
+
+/*
+ * The following macros call either the 16-bit or the 32-bit version of a
+ * matchfinder function based on the value of 'is_16_bit', which will be known
+ * at compilation time.
+ */
+
+#define CALL_HC_MF(is_16_bit, c, funcname, ...)                                      \
+       ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
+                      CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
+
+#define CALL_BT_MF(is_16_bit, c, funcname, ...)                                      \
+       ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
+                      CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
+
 /*
  * Structure to keep track of the current state of sending bits to the
  * compressed output buffer.
@@ -515,7 +542,7 @@ struct lzx_output_bitstream {
 
 /* Can the specified number of bits always be added to 'bitbuf' after any
  * pending 16-bit coding units have been flushed?  */
-#define CAN_BUFFER(n)  ((n) <= (8 * sizeof(machine_word_t)) - 16)
+#define CAN_BUFFER(n)  ((n) <= (8 * sizeof(machine_word_t)) - 15)
 
 /*
  * Initialize the output bitstream.
@@ -552,13 +579,21 @@ lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
 static inline void
 lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
 {
+       /* Masking the number of bits to shift is only needed to avoid undefined
+        * behavior; we don't actually care about the results of bad shifts.  On
+        * x86, the explicit masking generates no extra code.  */
+       const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
+
        if (os->end - os->next < 6)
                return;
-       put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
+       put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 16) &
+                                           shift_mask), os->next + 0);
        if (max_num_bits > 16)
-               put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
+               put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 32) &
+                                               shift_mask), os->next + 2);
        if (max_num_bits > 32)
-               put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
+               put_unaligned_u16_le(os->bitbuf >> ((os->bitcount - 48) &
+                                               shift_mask), os->next + 4);
        os->next += (os->bitcount >> 4) << 1;
        os->bitcount &= 15;
 }
@@ -601,7 +636,7 @@ lzx_make_huffman_codes(struct lzx_compressor *c)
 
        STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
                      MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
-       STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
+       STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
                      LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
        STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
                      ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
@@ -868,23 +903,30 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                                        unsigned lit1 = block_data[1];
                                        unsigned lit2 = block_data[2];
                                        unsigned lit3 = block_data[3];
-                                       lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
-                                       lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
-                                       lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
-                                       lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
+                                       lzx_add_bits(os, codes->codewords.main[lit0],
+                                                    codes->lens.main[lit0]);
+                                       lzx_add_bits(os, codes->codewords.main[lit1],
+                                                    codes->lens.main[lit1]);
+                                       lzx_add_bits(os, codes->codewords.main[lit2],
+                                                    codes->lens.main[lit2]);
+                                       lzx_add_bits(os, codes->codewords.main[lit3],
+                                                    codes->lens.main[lit3]);
                                        lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
                                        block_data += 4;
                                        litrunlen -= 4;
                                }
                                if (litrunlen--) {
                                        unsigned lit = *block_data++;
-                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                    codes->lens.main[lit]);
                                        if (litrunlen--) {
                                                unsigned lit = *block_data++;
-                                               lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                               lzx_add_bits(os, codes->codewords.main[lit],
+                                                            codes->lens.main[lit]);
                                                if (litrunlen--) {
                                                        unsigned lit = *block_data++;
-                                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                                    codes->lens.main[lit]);
                                                        lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
                                                } else {
                                                        lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
@@ -897,7 +939,8 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                                /* 32-bit: write 1 literal at a time.  */
                                do {
                                        unsigned lit = *block_data++;
-                                       lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+                                       lzx_add_bits(os, codes->codewords.main[lit],
+                                                    codes->lens.main[lit]);
                                        lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
                                } while (--litrunlen);
                        }
@@ -937,8 +980,10 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                /* If needed, output the length symbol for the match.  */
 
                if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
-                       lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
-                                    codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
+                       lzx_add_bits(os, codes->codewords.len[adjusted_length -
+                                                             LZX_NUM_PRIMARY_LENS],
+                                    codes->lens.len[adjusted_length -
+                                                    LZX_NUM_PRIMARY_LENS]);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
                }
@@ -956,11 +1001,15 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, 14);
 
-                       lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
-                                    codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
+                       lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+                                                                 LZX_ALIGNED_OFFSET_BITMASK],
+                                    codes->lens.aligned[adjusted_offset &
+                                                        LZX_ALIGNED_OFFSET_BITMASK]);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
                } else {
+                       STATIC_ASSERT(CAN_BUFFER(17));
+
                        lzx_add_bits(os, extra_bits, num_extra_bits);
                        if (!CAN_BUFFER(MAX_MATCH_BITS))
                                lzx_flush_bits(os, 17);
@@ -985,9 +1034,6 @@ lzx_write_compressed_block(const u8 *block_begin,
                           const struct lzx_lens * prev_lens,
                           struct lzx_output_bitstream * os)
 {
-       LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
-                  block_type == LZX_BLOCKTYPE_VERBATIM);
-
        /* The first three bits indicate the type of block and are one of the
         * LZX_BLOCKTYPE_* constants.  */
        lzx_write_bits(os, block_type, 3);
@@ -1075,9 +1121,10 @@ lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
  * compressor's acceleration tables to speed up the mapping.
  */
 static inline unsigned
-lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
+lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
+                        bool is_16_bit)
 {
-       if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
+       if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
                return c->offset_slot_tab_1[adjusted_offset];
        return c->offset_slot_tab_2[adjusted_offset >> 14];
 }
@@ -1126,7 +1173,7 @@ lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
  * offsets queue.  */
 static inline void
 lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
-                u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
+                u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
                 u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
 {
        u32 litrunlen = *litrunlen_p;
@@ -1147,7 +1194,7 @@ lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
        }
 
        /* Compute the offset slot  */
-       offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+       offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
 
        /* Compute the match header.  */
        v += offset_slot * LZX_NUM_LEN_HEADERS;
@@ -1200,8 +1247,8 @@ lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
  * beginning of the block), but this doesn't matter because this function only
  * computes frequencies.
  */
-static void
-lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
+static inline void
+lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
 {
        u32 node_idx = block_size;
        for (;;) {
@@ -1244,7 +1291,7 @@ lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
                }
 
                /* Tally the main symbol.  */
-               offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+               offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
                v += offset_slot * LZX_NUM_LEN_HEADERS;
                c->freqs.main[LZX_NUM_CHARS + v]++;
 
@@ -1262,8 +1309,8 @@ lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
  * first-to-last order.  The return value is the index in c->chosen_sequences at
  * which the lzx_sequences begin.
  */
-static u32
-lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
+static inline u32
+lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
 {
        u32 node_idx = block_size;
        u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
@@ -1320,7 +1367,7 @@ lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
                }
 
                /* Tally the main symbol.  */
-               offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+               offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
                v += offset_slot * LZX_NUM_LEN_HEADERS;
                c->freqs.main[LZX_NUM_CHARS + v]++;
 
@@ -1372,11 +1419,12 @@ out:
  * later.  The algorithm does not solve this problem; it only considers the
  * lowest cost to reach each individual position.
  */
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
 lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                       const u8 * const restrict block_begin,
                       const u32 block_size,
-                      const struct lzx_lru_queue initial_queue)
+                      const struct lzx_lru_queue initial_queue,
+                      bool is_16_bit)
 {
        struct lzx_optimum_node *cur_node = c->optimum_nodes;
        struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
@@ -1522,17 +1570,20 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                        do {
                                u32 offset = cache_ptr->offset;
                                u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
-                               unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+                               unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
+                                                                               is_16_bit);
+                               u32 base_cost = cur_node->cost;
+
+                       #if LZX_CONSIDER_ALIGNED_COSTS
+                               if (offset_data >= 16)
+                                       base_cost += c->costs.aligned[offset_data &
+                                                                     LZX_ALIGNED_OFFSET_BITMASK];
+                       #endif
+
                                do {
-                                       u32 cost = cur_node->cost +
+                                       u32 cost = base_cost +
                                                   c->costs.match_cost[offset_slot][
                                                                next_len - LZX_MIN_MATCH_LEN];
-                               #if LZX_CONSIDER_ALIGNED_COSTS
-                                       if (lzx_extra_offset_bits[offset_slot] >=
-                                           LZX_NUM_ALIGNED_OFFSET_BITS)
-                                               cost += c->costs.aligned[offset_data &
-                                                                        LZX_ALIGNED_OFFSET_BITMASK];
-                               #endif
                                        if (cost < (cur_node + next_len)->cost) {
                                                (cur_node + next_len)->cost = cost;
                                                (cur_node + next_len)->item =
@@ -1550,8 +1601,7 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
                 * of coding the literal is integrated into the queue update
                 * code below.  */
                literal = *in_next++;
-               cost = cur_node->cost +
-                      c->costs.main[lzx_main_symbol_for_literal(literal)];
+               cost = cur_node->cost + c->costs.main[literal];
 
                /* Advance to the next position.  */
                cur_node++;
@@ -1591,17 +1641,19 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
 static void
 lzx_compute_match_costs(struct lzx_compressor *c)
 {
-       unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
+       unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+                                       LZX_NUM_LEN_HEADERS;
        struct lzx_costs *costs = &c->costs;
 
        for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
 
                u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
-               unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
+               unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
+                                                       LZX_NUM_LEN_HEADERS);
                unsigned i;
 
        #if LZX_CONSIDER_ALIGNED_COSTS
-               if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
+               if (offset_slot >= 8)
                        extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
        #endif
 
@@ -1675,26 +1727,33 @@ lzx_update_costs(struct lzx_compressor *c)
        unsigned i;
        const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
 
-       for (i = 0; i < c->num_main_syms; i++)
-               c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
+       for (i = 0; i < c->num_main_syms; i++) {
+               c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+                                   MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 
-       for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
-               c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
+       for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+               c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+                                  LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 
 #if LZX_CONSIDER_ALIGNED_COSTS
-       for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
-               c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
+       for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+               c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+                                      ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
+       }
 #endif
 
        lzx_compute_match_costs(c);
 }
 
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
 lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
                             struct lzx_output_bitstream * const restrict os,
                             const u8 * const restrict block_begin,
                             const u32 block_size,
-                            const struct lzx_lru_queue initial_queue)
+                            const struct lzx_lru_queue initial_queue,
+                            bool is_16_bit)
 {
        unsigned num_passes_remaining = c->num_optim_passes;
        struct lzx_lru_queue new_queue;
@@ -1708,16 +1767,16 @@ lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
        lzx_reset_symbol_frequencies(c);
        do {
                new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
-                                                  initial_queue);
+                                                  initial_queue, is_16_bit);
                if (num_passes_remaining > 1) {
-                       lzx_tally_item_list(c, block_size);
+                       lzx_tally_item_list(c, block_size, is_16_bit);
                        lzx_make_huffman_codes(c);
                        lzx_update_costs(c);
                        lzx_reset_symbol_frequencies(c);
                }
        } while (--num_passes_remaining);
 
-       seq_idx = lzx_record_item_list(c, block_size);
+       seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
        lzx_finish_block(c, os, block_begin, block_size, seq_idx);
        return new_queue;
 }
@@ -1735,21 +1794,20 @@ lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
  * time, but rather to produce a compression ratio significantly better than a
  * simpler "greedy" or "lazy" parse while still being relatively fast.
  */
-static void
+static inline void
 lzx_compress_near_optimal(struct lzx_compressor *c,
-                         struct lzx_output_bitstream *os)
+                         struct lzx_output_bitstream *os,
+                         bool is_16_bit)
 {
        const u8 * const in_begin = c->in_buffer;
        const u8 *       in_next = in_begin;
        const u8 * const in_end  = in_begin + c->in_nbytes;
-       unsigned max_len = LZX_MAX_MATCH_LEN;
-       unsigned nice_len = min(c->nice_match_length, max_len);
-       u32 next_hash;
+       u32 max_len = LZX_MAX_MATCH_LEN;
+       u32 nice_len = min(c->nice_match_length, max_len);
+       u32 next_hashes[2] = {};
        struct lzx_lru_queue queue;
 
-       bt_matchfinder_init(&c->bt_mf);
-       memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
-       next_hash = bt_matchfinder_hash_3_bytes(in_next);
+       CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
        lzx_lru_queue_init(&queue);
 
        do {
@@ -1762,22 +1820,16 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                struct lz_match *cache_ptr = c->match_cache;
                do {
                        struct lz_match *lz_matchptr;
-                       u32 hash2;
-                       pos_t cur_match;
-                       unsigned best_len;
+                       u32 best_len;
 
                        /* If approaching the end of the input buffer, adjust
                         * 'max_len' and 'nice_len' accordingly.  */
                        if (unlikely(max_len > in_end - in_next)) {
                                max_len = in_end - in_next;
                                nice_len = min(max_len, nice_len);
-
-                               /* This extra check is needed to ensure that we
-                                * never output a length 2 match of the very
-                                * last two bytes with the very first two bytes,
-                                * since such a match has an offset too large to
-                                * be represented.  */
-                               if (unlikely(max_len < 3)) {
+                               if (unlikely(max_len <
+                                            BT_MATCHFINDER_REQUIRED_NBYTES))
+                               {
                                        in_next++;
                                        cache_ptr->length = 0;
                                        cache_ptr++;
@@ -1785,33 +1837,17 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                                }
                        }
 
-                       lz_matchptr = cache_ptr + 1;
-
-                       /* Check for a length 2 match.  */
-                       hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
-                       cur_match = c->hash2_tab[hash2];
-                       c->hash2_tab[hash2] = in_next - in_begin;
-                       if (cur_match != 0 &&
-                           (LZX_HASH2_ORDER == 16 ||
-                            load_u16_unaligned(&in_begin[cur_match]) ==
-                            load_u16_unaligned(in_next)))
-                       {
-                               lz_matchptr->length = 2;
-                               lz_matchptr->offset = in_next - &in_begin[cur_match];
-                               lz_matchptr++;
-                       }
-
-                       /* Check for matches of length >= 3.  */
-                       lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
-                                                                in_begin,
-                                                                in_next,
-                                                                3,
-                                                                max_len,
-                                                                nice_len,
-                                                                c->max_search_depth,
-                                                                &next_hash,
-                                                                &best_len,
-                                                                lz_matchptr);
+                       /* Check for matches.  */
+                       lz_matchptr = CALL_BT_MF(is_16_bit, c,
+                                                bt_matchfinder_get_matches,
+                                                in_begin,
+                                                in_next - in_begin,
+                                                max_len,
+                                                nice_len,
+                                                c->max_search_depth,
+                                                next_hashes,
+                                                &best_len,
+                                                cache_ptr + 1);
                        in_next++;
                        cache_ptr->length = lz_matchptr - (cache_ptr + 1);
                        cache_ptr = lz_matchptr;
@@ -1834,22 +1870,23 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                                        if (unlikely(max_len > in_end - in_next)) {
                                                max_len = in_end - in_next;
                                                nice_len = min(max_len, nice_len);
-                                               if (unlikely(max_len < 3)) {
+                                               if (unlikely(max_len <
+                                                            BT_MATCHFINDER_REQUIRED_NBYTES))
+                                               {
                                                        in_next++;
                                                        cache_ptr->length = 0;
                                                        cache_ptr++;
                                                        continue;
                                                }
                                        }
-                                       c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
-                                               in_next - in_begin;
-                                       bt_matchfinder_skip_position(&c->bt_mf,
-                                                                    in_begin,
-                                                                    in_next,
-                                                                    in_end,
-                                                                    nice_len,
-                                                                    c->max_search_depth,
-                                                                    &next_hash);
+                                       CALL_BT_MF(is_16_bit, c,
+                                                  bt_matchfinder_skip_position,
+                                                  in_begin,
+                                                  in_next - in_begin,
+                                                  max_len,
+                                                  nice_len,
+                                                  c->max_search_depth,
+                                                  next_hashes);
                                        in_next++;
                                        cache_ptr->length = 0;
                                        cache_ptr++;
@@ -1863,10 +1900,24 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
 
                queue = lzx_optimize_and_write_block(c, os, in_block_begin,
                                                     in_next - in_block_begin,
-                                                    queue);
+                                                    queue, is_16_bit);
        } while (in_next != in_end);
 }
 
+static void
+lzx_compress_near_optimal_16(struct lzx_compressor *c,
+                            struct lzx_output_bitstream *os)
+{
+       lzx_compress_near_optimal(c, os, true);
+}
+
+static void
+lzx_compress_near_optimal_32(struct lzx_compressor *c,
+                            struct lzx_output_bitstream *os)
+{
+       lzx_compress_near_optimal(c, os, false);
+}
+
 /*
  * Given a pointer to the current byte sequence and the current list of recent
  * match offsets, find the longest repeat offset match.
@@ -1883,7 +1934,6 @@ lzx_find_longest_repeat_offset_match(const u8 * const in_next,
                                     unsigned *rep_max_idx_ret)
 {
        STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
-       LZX_ASSERT(bytes_remaining >= 2);
 
        const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
        const u16 next_2_bytes = load_u16_unaligned(in_next);
@@ -1943,8 +1993,9 @@ lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
 }
 
 /* This is the "lazy" LZX compressor.  */
-static void
-lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+static inline void
+lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+                 bool is_16_bit)
 {
        const u8 * const in_begin = c->in_buffer;
        const u8 *       in_next = in_begin;
@@ -1955,7 +2006,7 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
        u32 recent_offsets[3] = {1, 1, 1};
        u32 next_hashes[2] = {};
 
-       hc_matchfinder_init(&c->hc_mf);
+       CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
 
        do {
                /* Starting a new block  */
@@ -1988,15 +2039,16 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
 
                        /* Find the longest match at the current position.  */
 
-                       cur_len = hc_matchfinder_longest_match(&c->hc_mf,
-                                                              in_begin,
-                                                              in_next - in_begin,
-                                                              2,
-                                                              max_len,
-                                                              nice_len,
-                                                              c->max_search_depth,
-                                                              next_hashes,
-                                                              &cur_offset);
+                       cur_len = CALL_HC_MF(is_16_bit, c,
+                                            hc_matchfinder_longest_match,
+                                            in_begin,
+                                            in_next - in_begin,
+                                            2,
+                                            max_len,
+                                            nice_len,
+                                            c->max_search_depth,
+                                            next_hashes,
+                                            &cur_offset);
                        if (cur_len < 3 ||
                            (cur_len == 3 &&
                             cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
@@ -2054,15 +2106,16 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
                                nice_len = min(max_len, nice_len);
                        }
 
-                       next_len = hc_matchfinder_longest_match(&c->hc_mf,
-                                                               in_begin,
-                                                               in_next - in_begin,
-                                                               cur_len - 2,
-                                                               max_len,
-                                                               nice_len,
-                                                               c->max_search_depth / 2,
-                                                               next_hashes,
-                                                               &next_offset);
+                       next_len = CALL_HC_MF(is_16_bit, c,
+                                             hc_matchfinder_longest_match,
+                                             in_begin,
+                                             in_next - in_begin,
+                                             cur_len - 2,
+                                             max_len,
+                                             nice_len,
+                                             c->max_search_depth / 2,
+                                             next_hashes,
+                                             &next_offset);
 
                        if (next_len <= cur_len - 2) {
                                in_next++;
@@ -2112,13 +2165,15 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
 
                choose_cur_match:
                        lzx_record_match(c, cur_len, cur_offset_data,
-                                        recent_offsets, &litrunlen, &next_seq);
-                       in_next = hc_matchfinder_skip_positions(&c->hc_mf,
-                                                               in_begin,
-                                                               in_next - in_begin,
-                                                               in_end - in_begin,
-                                                               skip_len,
-                                                               next_hashes);
+                                        recent_offsets, is_16_bit,
+                                        &litrunlen, &next_seq);
+                       in_next = CALL_HC_MF(is_16_bit, c,
+                                            hc_matchfinder_skip_positions,
+                                            in_begin,
+                                            in_next - in_begin,
+                                            in_end - in_begin,
+                                            skip_len,
+                                            next_hashes);
                } while (in_next < in_block_end);
 
                lzx_finish_sequence(next_seq, litrunlen);
@@ -2128,6 +2183,18 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
        } while (in_next != in_end);
 }
 
+static void
+lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+       lzx_compress_lazy(c, os, true);
+}
+
+static void
+lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+       lzx_compress_lazy(c, os, false);
+}
+
 /* Generate the acceleration tables for offset slots.  */
 static void
 lzx_init_offset_slot_tabs(struct lzx_compressor *c)
@@ -2158,11 +2225,19 @@ static size_t
 lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
 {
        if (compression_level <= LZX_MAX_FAST_LEVEL) {
-               return offsetof(struct lzx_compressor, hc_mf) +
-                       hc_matchfinder_size(max_bufsize);
+               if (lzx_is_16_bit(max_bufsize))
+                       return offsetof(struct lzx_compressor, hc_mf_16) +
+                              hc_matchfinder_size_16(max_bufsize);
+               else
+                       return offsetof(struct lzx_compressor, hc_mf_32) +
+                              hc_matchfinder_size_32(max_bufsize);
        } else {
-               return offsetof(struct lzx_compressor, bt_mf) +
-                       bt_matchfinder_size(max_bufsize);
+               if (lzx_is_16_bit(max_bufsize))
+                       return offsetof(struct lzx_compressor, bt_mf_16) +
+                              bt_matchfinder_size_16(max_bufsize);
+               else
+                       return offsetof(struct lzx_compressor, bt_mf_32) +
+                              bt_matchfinder_size_32(max_bufsize);
        }
 }
 
@@ -2211,9 +2286,12 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
 
                /* Fast compression: Use lazy parsing.  */
 
-               c->impl = lzx_compress_lazy;
-               c->max_search_depth = (36 * compression_level) / 20;
-               c->nice_match_length = (72 * compression_level) / 20;
+               if (lzx_is_16_bit(max_bufsize))
+                       c->impl = lzx_compress_lazy_16;
+               else
+                       c->impl = lzx_compress_lazy_32;
+               c->max_search_depth = (60 * compression_level) / 20;
+               c->nice_match_length = (80 * compression_level) / 20;
 
                /* lzx_compress_lazy() needs max_search_depth >= 2 because it
                 * halves the max_search_depth when attempting a lazy match, and
@@ -2224,12 +2302,15 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
 
                /* Normal / high compression: Use near-optimal parsing.  */
 
-               c->impl = lzx_compress_near_optimal;
+               if (lzx_is_16_bit(max_bufsize))
+                       c->impl = lzx_compress_near_optimal_16;
+               else
+                       c->impl = lzx_compress_near_optimal_32;
 
                /* Scale nice_match_length and max_search_depth with the
                 * compression level.  */
                c->max_search_depth = (24 * compression_level) / 50;
-               c->nice_match_length = (32 * compression_level) / 50;
+               c->nice_match_length = (48 * compression_level) / 50;
 
                /* Set a number of optimization passes appropriate for the
                 * compression level.  */
@@ -2290,7 +2371,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes,
        else
                memcpy(c->in_buffer, in, in_nbytes);
        c->in_nbytes = in_nbytes;
-       lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
+       lzx_preprocess(c->in_buffer, in_nbytes);
 
        /* Initially, the previous Huffman codeword lengths are all zeroes.  */
        c->codes_index = 0;
@@ -2305,7 +2386,7 @@ lzx_compress(const void *restrict in, size_t in_nbytes,
        /* Flush the output bitstream and return the compressed size or 0.  */
        result = lzx_flush_output(&os);
        if (!result && c->destructive)
-               lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
+               lzx_postprocess(c->in_buffer, c->in_nbytes);
        return result;
 }