]> wimlib.net Git - wimlib/blobdiff - src/lzms-compress.c
Compression updates
[wimlib] / src / lzms-compress.c
index 8ad19bc0d1cb2820524d4cde3b6f680b3bb45326..fb1f777ac6b5c3adeb69c1a8bf7508e603baf92f 100644 (file)
@@ -1,5 +1,7 @@
 /*
  * lzms-compress.c
+ *
+ * A compressor that produces output compatible with the LZMS compression format.
  */
 
 /*
  * along with wimlib; if not, see http://www.gnu.org/licenses/.
  */
 
-/* This a compressor for the LZMS compression format.  More details about this
- * format can be found in lzms-decompress.c.
- *
- * Also see lzx-compress.c for general information about match-finding and
- * match-choosing that also applies to this LZMS compressor.
- *
- * NOTE: this compressor currently does not code any delta matches.
- */
-
 #ifdef HAVE_CONFIG_H
 #  include "config.h"
 #endif
 
-#include "wimlib/assert.h"
-#include "wimlib/compiler.h"
-#include "wimlib/compressor_ops.h"
 #include "wimlib/compress_common.h"
+#include "wimlib/compressor_ops.h"
 #include "wimlib/endianness.h"
 #include "wimlib/error.h"
 #include "wimlib/lz_mf.h"
 #include <limits.h>
 #include <pthread.h>
 
-/* Stucture used for writing raw bits to the end of the LZMS-compressed data as
- * a series of 16-bit little endian coding units.  */
+/* Stucture used for writing raw bits as a series of 16-bit little endian coding
+ * units.  This starts at the *end* of the compressed data buffer and proceeds
+ * backwards.  */
 struct lzms_output_bitstream {
-       /* Buffer variable containing zero or more bits that have been logically
-        * written to the bitstream but not yet written to memory.  This must be
-        * at least as large as the coding unit size.  */
-       u16 bitbuf;
 
-       /* Number of bits in @bitbuf that are valid.  */
-       unsigned num_free_bits;
+       /* Bits that haven't yet been written to the output buffer.  */
+       u64 bitbuf;
+
+       /* Number of bits currently held in @bitbuf.  */
+       unsigned bitcount;
 
        /* Pointer to one past the next position in the compressed data buffer
         * at which to output a 16-bit coding unit.  */
-       le16 *out;
+       le16 *next;
 
-       /* Maximum number of 16-bit coding units that can still be output to
-        * the compressed data buffer.  */
-       size_t num_le16_remaining;
-
-       /* Set to %true if not all coding units could be output due to
-        * insufficient space.  */
-       bool overrun;
+       /* Pointer to the beginning of the output buffer.  (The "end" when
+        * writing backwards!)  */
+       le16 *begin;
 };
 
-/* Stucture used for range encoding (raw version).  */
+/* Stucture used for range encoding (raw version).  This starts at the
+ * *beginning* of the compressed data buffer and proceeds forward.  */
 struct lzms_range_encoder_raw {
 
        /* A 33-bit variable that holds the low boundary of the current range.
@@ -91,25 +79,21 @@ struct lzms_range_encoder_raw {
         * subsequent such coding units are 0xffff.  */
        u32 cache_size;
 
-       /* Pointer to the next position in the compressed data buffer at which
-        * to output a 16-bit coding unit.  */
-       le16 *out;
-
-       /* Maximum number of 16-bit coding units that can still be output to
-        * the compressed data buffer.  */
-       size_t num_le16_remaining;
+       /* Pointer to the beginning of the output buffer.  */
+       le16 *begin;
 
-       /* %true when the very first coding unit has not yet been output.  */
-       bool first;
+       /* Pointer to the position in the output buffer at which the next coding
+        * unit must be written.  */
+       le16 *next;
 
-       /* Set to %true if not all coding units could be output due to
-        * insufficient space.  */
-       bool overrun;
+       /* Pointer just past the end of the output buffer.  */
+       le16 *end;
 };
 
 /* Structure used for range encoding.  This wraps around `struct
  * lzms_range_encoder_raw' to use and maintain probability entries.  */
 struct lzms_range_encoder {
+
        /* Pointer to the raw range encoder, which has no persistent knowledge
         * of probabilities.  Multiple lzms_range_encoder's share the same
         * lzms_range_encoder_raw.  */
@@ -157,6 +141,7 @@ struct lzms_huffman_encoder {
        u32 codewords[LZMS_MAX_NUM_SYMS];
 };
 
+/* Internal compression parameters  */
 struct lzms_compressor_params {
        u32 min_match_length;
        u32 nice_match_length;
@@ -164,44 +149,35 @@ struct lzms_compressor_params {
        u32 optim_array_length;
 };
 
-/* State of the LZMS compressor.  */
+/* State of the LZMS compressor  */
 struct lzms_compressor {
-       /* Pointer to a buffer holding the preprocessed data to compress.  */
-       u8 *window;
 
-       /* Current position in @buffer.  */
-       u32 cur_window_pos;
+       /* Internal compression parameters  */
+       struct lzms_compressor_params params;
 
-       /* Size of the data in @buffer.  */
-       u32 window_size;
+       /* Data currently being compressed  */
+       u8 *cur_window;
+       u32 cur_window_size;
 
-       /* Lempel-Ziv match-finder.  */
+       /* Lempel-Ziv match-finder  */
        struct lz_mf *mf;
 
-       /* Temporary space to store found matches.  */
+       /* Temporary space to store found matches  */
        struct lz_match *matches;
 
-       /* Match-chooser data.  */
+       /* Per-position data for near-optimal parsing  */
        struct lzms_mc_pos_data *optimum;
-       unsigned optimum_cur_idx;
-       unsigned optimum_end_idx;
-
-       /* Maximum block size this compressor instantiation allows.  This is the
-        * allocated size of @window.  */
-       u32 max_block_size;
-
-       /* Compression parameters.  */
-       struct lzms_compressor_params params;
+       struct lzms_mc_pos_data *optimum_end;
 
        /* Raw range encoder which outputs to the beginning of the compressed
-        * data buffer, proceeding forwards.  */
+        * data buffer, proceeding forwards  */
        struct lzms_range_encoder_raw rc;
 
        /* Bitstream which outputs to the end of the compressed data buffer,
-        * proceeding backwards.  */
+        * proceeding backwards  */
        struct lzms_output_bitstream os;
 
-       /* Range encoders.  */
+       /* Range encoders  */
        struct lzms_range_encoder main_range_encoder;
        struct lzms_range_encoder match_range_encoder;
        struct lzms_range_encoder lz_match_range_encoder;
@@ -209,33 +185,79 @@ struct lzms_compressor {
        struct lzms_range_encoder delta_match_range_encoder;
        struct lzms_range_encoder delta_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
 
-       /* Huffman encoders.  */
+       /* Huffman encoders  */
        struct lzms_huffman_encoder literal_encoder;
        struct lzms_huffman_encoder lz_offset_encoder;
        struct lzms_huffman_encoder length_encoder;
        struct lzms_huffman_encoder delta_power_encoder;
        struct lzms_huffman_encoder delta_offset_encoder;
 
-       /* LRU (least-recently-used) queues for match information.  */
-       struct lzms_lru_queues lru;
-
-       /* Used for preprocessing.  */
+       /* Used for preprocessing  */
        s32 last_target_usages[65536];
+
+#define LZMS_NUM_FAST_LENGTHS 256
+       /* Table: length => length slot for small lengths  */
+       u8 length_slot_fast[LZMS_NUM_FAST_LENGTHS];
+
+       /* Table: length => current cost for small match lengths  */
+       u32 length_cost_fast[LZMS_NUM_FAST_LENGTHS];
+
+#define LZMS_NUM_FAST_OFFSETS 32768
+       /* Table: offset => offset slot for small offsets  */
+       u8 offset_slot_fast[LZMS_NUM_FAST_OFFSETS];
 };
 
+/*
+ * Match chooser position data:
+ *
+ * An array of these structures is used during the near-optimal match-choosing
+ * algorithm.  They correspond to consecutive positions in the window and are
+ * used to keep track of the cost to reach each position, and the match/literal
+ * choices that need to be chosen to reach that position.
+ */
 struct lzms_mc_pos_data {
+
+       /* The cost, in bits, of the lowest-cost path that has been found to
+        * reach this position.  This can change as progressively lower cost
+        * paths are found to reach this position.  */
        u32 cost;
-#define MC_INFINITE_COST ((u32)~0UL)
-       union {
-               struct {
-                       u32 link;
-                       u32 match_offset;
-               } prev;
-               struct {
-                       u32 link;
-                       u32 match_offset;
-               } next;
-       };
+#define MC_INFINITE_COST UINT32_MAX
+
+       /* The match or literal that was taken to reach this position.  This can
+        * change as progressively lower cost paths are found to reach this
+        * position.
+        *
+        * This variable is divided into two bitfields.
+        *
+        * Literals:
+        *      Low bits are 1, high bits are the literal.
+        *
+        * Explicit offset matches:
+        *      Low bits are the match length, high bits are the offset plus 2.
+        *
+        * Repeat offset matches:
+        *      Low bits are the match length, high bits are the queue index.
+        */
+       u64 mc_item_data;
+#define MC_OFFSET_SHIFT 32
+#define MC_LEN_MASK (((u64)1 << MC_OFFSET_SHIFT) - 1)
+
+       /* The LZMS adaptive state that exists at this position.  This is filled
+        * in lazily, only after the minimum-cost path to this position is
+        * found.
+        *
+        * Note: the way we handle this adaptive state in the "minimum-cost"
+        * parse is actually only an approximation.  It's possible for the
+        * globally optimal, minimum cost path to contain a prefix, ending at a
+        * position, where that path prefix is *not* the minimum cost path to
+        * that position.  This can happen if such a path prefix results in a
+        * different adaptive state which results in lower costs later.  We do
+        * not solve this problem; we only consider the lowest cost to reach
+        * each position, which seems to be an acceptable approximation.
+        *
+        * Note: this adaptive state also does not include the probability
+        * entries or current Huffman codewords.  Those aren't maintained
+        * per-position and are only updated occassionally.  */
        struct lzms_adaptive_state {
                struct lzms_lz_lru_queues lru;
                u8 main_state;
@@ -245,60 +267,110 @@ struct lzms_mc_pos_data {
        } state;
 };
 
-/* Initialize the output bitstream @os to write forwards to the specified
+static void
+lzms_init_fast_slots(struct lzms_compressor *c)
+{
+       /* Create table mapping small lengths to length slots.  */
+       for (unsigned slot = 0, i = 0; i < LZMS_NUM_FAST_LENGTHS; i++) {
+               while (i >= lzms_length_slot_base[slot + 1])
+                       slot++;
+               c->length_slot_fast[i] = slot;
+       }
+
+       /* Create table mapping small offsets to offset slots.  */
+       for (unsigned slot = 0, i = 0; i < LZMS_NUM_FAST_OFFSETS; i++) {
+               while (i >= lzms_offset_slot_base[slot + 1])
+                       slot++;
+               c->offset_slot_fast[i] = slot;
+       }
+}
+
+static inline unsigned
+lzms_get_length_slot_fast(const struct lzms_compressor *c, u32 length)
+{
+       if (likely(length < LZMS_NUM_FAST_LENGTHS))
+               return c->length_slot_fast[length];
+       else
+               return lzms_get_length_slot(length);
+}
+
+static inline unsigned
+lzms_get_offset_slot_fast(const struct lzms_compressor *c, u32 offset)
+{
+       if (offset < LZMS_NUM_FAST_OFFSETS)
+               return c->offset_slot_fast[offset];
+       else
+               return lzms_get_offset_slot(offset);
+}
+
+/* Initialize the output bitstream @os to write backwards to the specified
  * compressed data buffer @out that is @out_limit 16-bit integers long.  */
 static void
 lzms_output_bitstream_init(struct lzms_output_bitstream *os,
                           le16 *out, size_t out_limit)
 {
        os->bitbuf = 0;
-       os->num_free_bits = 16;
-       os->out = out + out_limit;
-       os->num_le16_remaining = out_limit;
-       os->overrun = false;
+       os->bitcount = 0;
+       os->next = out + out_limit;
+       os->begin = out;
 }
 
-/* Write @num_bits bits, contained in the low @num_bits bits of @bits (ordered
- * from high-order to low-order), to the output bitstream @os.  */
-static void
-lzms_output_bitstream_put_bits(struct lzms_output_bitstream *os,
-                              u32 bits, unsigned num_bits)
+/*
+ * Write some bits, contained in the low @num_bits bits of @bits (ordered from
+ * high-order to low-order), to the output bitstream @os.
+ *
+ * @max_num_bits is a compile-time constant that specifies the maximum number of
+ * bits that can ever be written at this call site.
+ */
+static inline void
+lzms_output_bitstream_put_varbits(struct lzms_output_bitstream *os,
+                                 u32 bits, unsigned num_bits,
+                                 unsigned max_num_bits)
 {
-       bits &= (1U << num_bits) - 1;
+       LZMS_ASSERT(num_bits <= 48);
 
-       while (num_bits > os->num_free_bits) {
+       /* Add the bits to the bit buffer variable.  */
+       os->bitcount += num_bits;
+       os->bitbuf = (os->bitbuf << num_bits) | bits;
 
-               if (unlikely(os->num_le16_remaining == 0)) {
-                       os->overrun = true;
-                       return;
-               }
+       /* Check whether any coding units need to be written.  */
+       while (os->bitcount >= 16) {
 
-               unsigned num_fill_bits = os->num_free_bits;
+               os->bitcount -= 16;
 
-               os->bitbuf <<= num_fill_bits;
-               os->bitbuf |= bits >> (num_bits - num_fill_bits);
+               /* Write a coding unit, unless it would underflow the buffer. */
+               if (os->next != os->begin)
+                       *--os->next = cpu_to_le16(os->bitbuf >> os->bitcount);
 
-               *--os->out = cpu_to_le16(os->bitbuf);
-               --os->num_le16_remaining;
-
-               os->num_free_bits = 16;
-               num_bits -= num_fill_bits;
-               bits &= (1U << num_bits) - 1;
+               /* Optimization for call sites that never write more than 16
+                * bits at once.  */
+               if (max_num_bits <= 16)
+                       break;
        }
-       os->bitbuf <<= num_bits;
-       os->bitbuf |= bits;
-       os->num_free_bits -= num_bits;
+}
+
+/* Use when @num_bits is a compile-time constant.  Otherwise use
+ * lzms_output_bitstream_put_bits().  */
+static inline void
+lzms_output_bitstream_put_bits(struct lzms_output_bitstream *os,
+                              u32 bits, unsigned num_bits)
+{
+       lzms_output_bitstream_put_varbits(os, bits, num_bits, num_bits);
 }
 
 /* Flush the output bitstream, ensuring that all bits written to it have been
- * written to memory.  Returns %true if all bits were output successfully, or
- * %false if an overrun occurred.  */
+ * written to memory.  Returns %true if all bits have been output successfully,
+ * or %false if an overrun occurred.  */
 static bool
 lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
 {
-       if (os->num_free_bits != 16)
-               lzms_output_bitstream_put_bits(os, 0, os->num_free_bits + 1);
-       return !os->overrun;
+       if (os->next == os->begin)
+               return false;
+
+       if (os->bitcount != 0)
+               *--os->next = cpu_to_le16(os->bitbuf << (16 - os->bitcount));
+
+       return true;
 }
 
 /* Initialize the range encoder @rc to write forwards to the specified
@@ -311,10 +383,9 @@ lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
        rc->range = 0xffffffff;
        rc->cache = 0;
        rc->cache_size = 1;
-       rc->out = out;
-       rc->num_le16_remaining = out_limit;
-       rc->first = true;
-       rc->overrun = false;
+       rc->begin = out;
+       rc->next = out - 1;
+       rc->end = out + out_limit;
 }
 
 /*
@@ -334,26 +405,19 @@ lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
 static void
 lzms_range_encoder_raw_shift_low(struct lzms_range_encoder_raw *rc)
 {
-       LZMS_DEBUG("low=%"PRIx64", cache=%"PRIx64", cache_size=%u",
-                  rc->low, rc->cache, rc->cache_size);
        if ((u32)(rc->low) < 0xffff0000 ||
            (u32)(rc->low >> 32) != 0)
        {
                /* Carry not needed (rc->low < 0xffff0000), or carry occurred
                 * ((rc->low >> 32) != 0, a.k.a. the carry bit is 1).  */
                do {
-                       if (!rc->first) {
-                               if (rc->num_le16_remaining == 0) {
-                                       rc->overrun = true;
-                                       return;
-                               }
-                               *rc->out++ = cpu_to_le16(rc->cache +
-                                                        (u16)(rc->low >> 32));
-                               --rc->num_le16_remaining;
+                       if (likely(rc->next >= rc->begin)) {
+                               if (rc->next != rc->end)
+                                       *rc->next++ = cpu_to_le16(rc->cache +
+                                                                 (u16)(rc->low >> 32));
                        } else {
-                               rc->first = false;
+                               rc->next++;
                        }
-
                        rc->cache = 0xffff;
                } while (--rc->cache_size != 0);
 
@@ -377,15 +441,15 @@ lzms_range_encoder_raw_flush(struct lzms_range_encoder_raw *rc)
 {
        for (unsigned i = 0; i < 4; i++)
                lzms_range_encoder_raw_shift_low(rc);
-       return !rc->overrun;
+       return rc->next != rc->end;
 }
 
 /* Encode the next bit using the range encoder (raw version).
  *
  * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0.  */
-static void
-lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc, int bit,
-                                 u32 prob)
+static inline void
+lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc,
+                                 int bit, u32 prob)
 {
        lzms_range_encoder_raw_normalize(rc);
 
@@ -400,7 +464,7 @@ lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc, int bit,
 
 /* Encode a bit using the specified range encoder. This wraps around
  * lzms_range_encoder_raw_encode_bit() to handle using and updating the
- * appropriate probability table.  */
+ * appropriate state and probability entry.  */
 static void
 lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
 {
@@ -410,207 +474,197 @@ lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
        /* Load the probability entry corresponding to the current state.  */
        prob_entry = &enc->prob_entries[enc->state];
 
-       /* Treat the number of zero bits in the most recently encoded
-        * LZMS_PROBABILITY_MAX bits with this probability entry as the chance,
-        * out of LZMS_PROBABILITY_MAX, that the next bit will be a 0.  However,
-        * don't allow 0% or 100% probabilities.  */
-       prob = prob_entry->num_recent_zero_bits;
-       if (prob == 0)
-               prob = 1;
-       else if (prob == LZMS_PROBABILITY_MAX)
-               prob = LZMS_PROBABILITY_MAX - 1;
-
-       /* Encode the next bit.  */
+       /* Update the state based on the next bit.  */
+       enc->state = ((enc->state << 1) | bit) & enc->mask;
+
+       /* Get the probability that the bit is 0.  */
+       prob = lzms_get_probability(prob_entry);
+
+       /* Update the probability entry.  */
+       lzms_update_probability_entry(prob_entry, bit);
+
+       /* Encode the bit.  */
        lzms_range_encoder_raw_encode_bit(enc->rc, bit, prob);
+}
 
-       /* Update the state based on the newly encoded bit.  */
-       enc->state = ((enc->state << 1) | bit) & enc->mask;
+/* Called when an adaptive Huffman code needs to be rebuilt.  */
+static void
+lzms_rebuild_huffman_code(struct lzms_huffman_encoder *enc)
+{
+       make_canonical_huffman_code(enc->num_syms,
+                                   LZMS_MAX_CODEWORD_LEN,
+                                   enc->sym_freqs,
+                                   enc->lens,
+                                   enc->codewords);
 
-       /* Update the recent bits, including the cached count of 0's.  */
-       BUILD_BUG_ON(LZMS_PROBABILITY_MAX > sizeof(prob_entry->recent_bits) * 8);
-       if (bit == 0) {
-               if (prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1))) {
-                       /* Replacing 1 bit with 0 bit; increment the zero count.
-                        */
-                       prob_entry->num_recent_zero_bits++;
-               }
-       } else {
-               if (!(prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1)))) {
-                       /* Replacing 0 bit with 1 bit; decrement the zero count.
-                        */
-                       prob_entry->num_recent_zero_bits--;
-               }
+       /* Dilute the frequencies.  */
+       for (unsigned i = 0; i < enc->num_syms; i++) {
+               enc->sym_freqs[i] >>= 1;
+               enc->sym_freqs[i] += 1;
        }
-       prob_entry->recent_bits = (prob_entry->recent_bits << 1) | bit;
+       enc->num_syms_written = 0;
 }
 
 /* Encode a symbol using the specified Huffman encoder.  */
-static void
-lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, u32 sym)
+static inline void
+lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, unsigned sym)
 {
-       LZMS_ASSERT(sym < enc->num_syms);
-       lzms_output_bitstream_put_bits(enc->os,
-                                      enc->codewords[sym],
-                                      enc->lens[sym]);
+       lzms_output_bitstream_put_varbits(enc->os,
+                                         enc->codewords[sym],
+                                         enc->lens[sym],
+                                         LZMS_MAX_CODEWORD_LEN);
        ++enc->sym_freqs[sym];
-       if (++enc->num_syms_written == enc->rebuild_freq) {
-               /* Adaptive code needs to be rebuilt.  */
-               LZMS_DEBUG("Rebuilding code (num_syms=%u)", enc->num_syms);
-               make_canonical_huffman_code(enc->num_syms,
-                                           LZMS_MAX_CODEWORD_LEN,
-                                           enc->sym_freqs,
-                                           enc->lens,
-                                           enc->codewords);
-
-               /* Dilute the frequencies.  */
-               for (unsigned i = 0; i < enc->num_syms; i++) {
-                       enc->sym_freqs[i] >>= 1;
-                       enc->sym_freqs[i] += 1;
-               }
-               enc->num_syms_written = 0;
-       }
+       if (++enc->num_syms_written == enc->rebuild_freq)
+               lzms_rebuild_huffman_code(enc);
 }
 
 static void
-lzms_encode_length(struct lzms_huffman_encoder *enc, u32 length)
+lzms_update_fast_length_costs(struct lzms_compressor *c);
+
+/* Encode a match length.  */
+static void
+lzms_encode_length(struct lzms_compressor *c, u32 length)
 {
        unsigned slot;
        unsigned num_extra_bits;
        u32 extra_bits;
 
-       slot = lzms_get_length_slot(length);
+       slot = lzms_get_length_slot_fast(c, length);
 
+       extra_bits = length - lzms_length_slot_base[slot];
        num_extra_bits = lzms_extra_length_bits[slot];
 
-       extra_bits = length - lzms_length_slot_base[slot];
+       lzms_huffman_encode_symbol(&c->length_encoder, slot);
+       if (c->length_encoder.num_syms_written == 0)
+               lzms_update_fast_length_costs(c);
 
-       lzms_huffman_encode_symbol(enc, slot);
-       lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
+       lzms_output_bitstream_put_varbits(c->length_encoder.os,
+                                         extra_bits, num_extra_bits, 30);
 }
 
+/* Encode an LZ match offset.  */
 static void
-lzms_encode_offset(struct lzms_huffman_encoder *enc, u32 offset)
+lzms_encode_lz_offset(struct lzms_compressor *c, u32 offset)
 {
        unsigned slot;
        unsigned num_extra_bits;
        u32 extra_bits;
 
-       slot = lzms_get_position_slot(offset);
-
-       num_extra_bits = lzms_extra_position_bits[slot];
+       slot = lzms_get_offset_slot_fast(c, offset);
 
-       extra_bits = offset - lzms_position_slot_base[slot];
+       extra_bits = offset - lzms_offset_slot_base[slot];
+       num_extra_bits = lzms_extra_offset_bits[slot];
 
-       lzms_huffman_encode_symbol(enc, slot);
-       lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
-}
-
-static void
-lzms_begin_encode_item(struct lzms_compressor *ctx)
-{
-       ctx->lru.lz.upcoming_offset = 0;
-       ctx->lru.delta.upcoming_offset = 0;
-       ctx->lru.delta.upcoming_power = 0;
-}
-
-static void
-lzms_end_encode_item(struct lzms_compressor *ctx, u32 length)
-{
-       LZMS_ASSERT(ctx->window_size - ctx->cur_window_pos >= length);
-       ctx->cur_window_pos += length;
-       lzms_update_lru_queues(&ctx->lru);
+       lzms_huffman_encode_symbol(&c->lz_offset_encoder, slot);
+       lzms_output_bitstream_put_varbits(c->lz_offset_encoder.os,
+                                         extra_bits, num_extra_bits, 30);
 }
 
 /* Encode a literal byte.  */
 static void
-lzms_encode_literal(struct lzms_compressor *ctx, u8 literal)
+lzms_encode_literal(struct lzms_compressor *c, unsigned literal)
 {
-       LZMS_DEBUG("Position %u: Encoding literal 0x%02x ('%c')",
-                  ctx->cur_window_pos, literal, literal);
-
-       lzms_begin_encode_item(ctx);
-
        /* Main bit: 0 = a literal, not a match.  */
-       lzms_range_encode_bit(&ctx->main_range_encoder, 0);
+       lzms_range_encode_bit(&c->main_range_encoder, 0);
 
        /* Encode the literal using the current literal Huffman code.  */
-       lzms_huffman_encode_symbol(&ctx->literal_encoder, literal);
-
-       lzms_end_encode_item(ctx, 1);
+       lzms_huffman_encode_symbol(&c->literal_encoder, literal);
 }
 
-/* Encode a (length, offset) pair (LZ match).  */
+/* Encode an LZ repeat offset match.  */
 static void
-lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset)
+lzms_encode_lz_repeat_offset_match(struct lzms_compressor *c,
+                                  u32 length, unsigned rep_index)
 {
-       int recent_offset_idx;
-
-       LZMS_DEBUG("Position %u: Encoding LZ match {length=%u, offset=%u}",
-                  ctx->cur_window_pos, length, offset);
-
-       LZMS_ASSERT(length <= ctx->window_size - ctx->cur_window_pos);
-       LZMS_ASSERT(offset <= ctx->cur_window_pos);
-       LZMS_ASSERT(!memcmp(&ctx->window[ctx->cur_window_pos],
-                           &ctx->window[ctx->cur_window_pos - offset],
-                           length));
-
-       lzms_begin_encode_item(ctx);
+       unsigned i;
 
        /* Main bit: 1 = a match, not a literal.  */
-       lzms_range_encode_bit(&ctx->main_range_encoder, 1);
+       lzms_range_encode_bit(&c->main_range_encoder, 1);
 
        /* Match bit: 0 = an LZ match, not a delta match.  */
-       lzms_range_encode_bit(&ctx->match_range_encoder, 0);
+       lzms_range_encode_bit(&c->match_range_encoder, 0);
 
-       /* Determine if the offset can be represented as a recent offset.  */
-       for (recent_offset_idx = 0;
-            recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
-            recent_offset_idx++)
-               if (offset == ctx->lru.lz.recent_offsets[recent_offset_idx])
-                       break;
+       /* LZ match bit: 1 = repeat offset, not an explicit offset.  */
+       lzms_range_encode_bit(&c->lz_match_range_encoder, 1);
 
-       if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
-               /* Explicit offset.  */
+       /* Encode the repeat offset index.  A 1 bit is encoded for each index
+        * passed up.  This sequence of 1 bits is terminated by a 0 bit, or
+        * automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1 bits have been
+        * encoded.  */
+       for (i = 0; i < rep_index; i++)
+               lzms_range_encode_bit(&c->lz_repeat_match_range_encoders[i], 1);
 
-               /* LZ match bit: 0 = explicit offset, not a recent offset.  */
-               lzms_range_encode_bit(&ctx->lz_match_range_encoder, 0);
+       if (i < LZMS_NUM_RECENT_OFFSETS - 1)
+               lzms_range_encode_bit(&c->lz_repeat_match_range_encoders[i], 0);
 
-               /* Encode the match offset.  */
-               lzms_encode_offset(&ctx->lz_offset_encoder, offset);
-       } else {
-               int i;
-
-               /* Recent offset.  */
+       /* Encode the match length.  */
+       lzms_encode_length(c, length);
+}
 
-               /* LZ match bit: 1 = recent offset, not an explicit offset.  */
-               lzms_range_encode_bit(&ctx->lz_match_range_encoder, 1);
+/* Encode an LZ explicit offset match.  */
+static void
+lzms_encode_lz_explicit_offset_match(struct lzms_compressor *c,
+                                    u32 length, u32 offset)
+{
+       /* Main bit: 1 = a match, not a literal.  */
+       lzms_range_encode_bit(&c->main_range_encoder, 1);
 
-               /* Encode the recent offset index.  A 1 bit is encoded for each
-                * index passed up.  This sequence of 1 bits is terminated by a
-                * 0 bit, or automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1
-                * bits have been encoded.  */
-               for (i = 0; i < recent_offset_idx; i++)
-                       lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 1);
+       /* Match bit: 0 = an LZ match, not a delta match.  */
+       lzms_range_encode_bit(&c->match_range_encoder, 0);
 
-               if (i < LZMS_NUM_RECENT_OFFSETS - 1)
-                       lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 0);
+       /* LZ match bit: 0 = explicit offset, not a repeat offset.  */
+       lzms_range_encode_bit(&c->lz_match_range_encoder, 0);
 
-               /* Initial update of the LZ match offset LRU queue.  */
-               for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
-                       ctx->lru.lz.recent_offsets[i] = ctx->lru.lz.recent_offsets[i + 1];
-       }
+       /* Encode the match offset.  */
+       lzms_encode_lz_offset(c, offset);
 
        /* Encode the match length.  */
-       lzms_encode_length(&ctx->length_encoder, length);
+       lzms_encode_length(c, length);
+}
 
-       /* Save the match offset for later insertion at the front of the LZ
-        * match offset LRU queue.  */
-       ctx->lru.lz.upcoming_offset = offset;
+static void
+lzms_encode_item(struct lzms_compressor *c, u64 mc_item_data)
+{
+       u32 len = mc_item_data & MC_LEN_MASK;
+       u32 offset_data = mc_item_data >> MC_OFFSET_SHIFT;
 
-       lzms_end_encode_item(ctx, length);
+       if (len == 1)
+               lzms_encode_literal(c, offset_data);
+       else if (offset_data < LZMS_NUM_RECENT_OFFSETS)
+               lzms_encode_lz_repeat_offset_match(c, len, offset_data);
+       else
+               lzms_encode_lz_explicit_offset_match(c, len, offset_data - LZMS_OFFSET_OFFSET);
 }
 
-#define LZMS_COST_SHIFT 5
+/* Encode a list of matches and literals chosen by the parsing algorithm.  */
+static void
+lzms_encode_item_list(struct lzms_compressor *c,
+                     struct lzms_mc_pos_data *cur_optimum_ptr)
+{
+       struct lzms_mc_pos_data *end_optimum_ptr;
+       u64 saved_item;
+       u64 item;
+
+       /* The list is currently in reverse order (last item to first item).
+        * Reverse it.  */
+       end_optimum_ptr = cur_optimum_ptr;
+       saved_item = cur_optimum_ptr->mc_item_data;
+       do {
+               item = saved_item;
+               cur_optimum_ptr -= item & MC_LEN_MASK;
+               saved_item = cur_optimum_ptr->mc_item_data;
+               cur_optimum_ptr->mc_item_data = item;
+       } while (cur_optimum_ptr != c->optimum);
+
+       /* Walk the list of items from beginning to end, encoding each item.  */
+       do {
+               lzms_encode_item(c, cur_optimum_ptr->mc_item_data);
+               cur_optimum_ptr += (cur_optimum_ptr->mc_item_data) & MC_LEN_MASK;
+       } while (cur_optimum_ptr != end_optimum_ptr);
+}
+
+/* Each bit costs 1 << LZMS_COST_SHIFT units.  */
+#define LZMS_COST_SHIFT 6
 
 /*#define LZMS_RC_COSTS_USE_FLOATING_POINT*/
 
@@ -681,24 +735,14 @@ lzms_init_rc_costs(void)
        pthread_once(&once, lzms_do_init_rc_costs);
 }
 
-/*
- * Return the cost to range-encode the specified bit when in the specified
- * state.
- *
- * @enc                The range encoder to use.
- * @cur_state  Current state, which indicates the probability entry to choose.
- *             Updated by this function.
- * @bit                The bit to encode (0 or 1).
- */
-static u32
-lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 *cur_state, int bit)
+/* Return the cost to range-encode the specified bit from the specified state.*/
+static inline u32
+lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 cur_state, int bit)
 {
        u32 prob_zero;
        u32 prob_correct;
 
-       prob_zero = enc->prob_entries[*cur_state & enc->mask].num_recent_zero_bits;
-
-       *cur_state = (*cur_state << 1) | bit;
+       prob_zero = enc->prob_entries[cur_state].num_recent_zero_bits;
 
        if (bit == 0)
                prob_correct = prob_zero;
@@ -708,444 +752,487 @@ lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 *cur_state, int bit)
        return lzms_rc_costs[prob_correct];
 }
 
-static u32
-lzms_huffman_symbol_cost(const struct lzms_huffman_encoder *enc, u32 sym)
+/* Return the cost to Huffman-encode the specified symbol.  */
+static inline u32
+lzms_huffman_symbol_cost(const struct lzms_huffman_encoder *enc, unsigned sym)
 {
-       return enc->lens[sym] << LZMS_COST_SHIFT;
+       return (u32)enc->lens[sym] << LZMS_COST_SHIFT;
 }
 
-static u32
-lzms_offset_cost(const struct lzms_huffman_encoder *enc, u32 offset)
+/* Return the cost to encode the specified literal byte.  */
+static inline u32
+lzms_literal_cost(const struct lzms_compressor *c, unsigned literal,
+                 const struct lzms_adaptive_state *state)
 {
-       u32 slot;
-       u32 num_extra_bits;
-       u32 cost = 0;
-
-       slot = lzms_get_position_slot(offset);
-
-       cost += lzms_huffman_symbol_cost(enc, slot);
-
-       num_extra_bits = lzms_extra_position_bits[slot];
-
-       cost += num_extra_bits << LZMS_COST_SHIFT;
-
-       return cost;
+       return lzms_rc_bit_cost(&c->main_range_encoder, state->main_state, 0) +
+              lzms_huffman_symbol_cost(&c->literal_encoder, literal);
 }
 
-static u32
-lzms_get_length_cost(const struct lzms_huffman_encoder *enc, u32 length)
+/* Update the table that directly provides the costs for small lengths.  */
+static void
+lzms_update_fast_length_costs(struct lzms_compressor *c)
 {
-       u32 slot;
-       u32 num_extra_bits;
+       u32 len;
+       int slot = -1;
        u32 cost = 0;
 
-       slot = lzms_get_length_slot(length);
+       for (len = 1; len < LZMS_NUM_FAST_LENGTHS; len++) {
 
-       cost += lzms_huffman_symbol_cost(enc, slot);
-
-       num_extra_bits = lzms_extra_length_bits[slot];
-
-       cost += num_extra_bits << LZMS_COST_SHIFT;
+               while (len >= lzms_length_slot_base[slot + 1]) {
+                       slot++;
+                       cost = (u32)(c->length_encoder.lens[slot] +
+                                    lzms_extra_length_bits[slot]) << LZMS_COST_SHIFT;
+               }
 
-       return cost;
+               c->length_cost_fast[len] = cost;
+       }
 }
 
-static u32
-lzms_get_matches(struct lzms_compressor *ctx, struct lz_match **matches_ret)
+/* Return the cost to encode the specified match length, which must be less than
+ * LZMS_NUM_FAST_LENGTHS.  */
+static inline u32
+lzms_fast_length_cost(const struct lzms_compressor *c, u32 length)
 {
-       *matches_ret = ctx->matches;
-       return lz_mf_get_matches(ctx->mf, ctx->matches);
+       LZMS_ASSERT(length < LZMS_NUM_FAST_LENGTHS);
+       return c->length_cost_fast[length];
 }
 
-static void
-lzms_skip_bytes(struct lzms_compressor *ctx, u32 n)
+/* Return the cost to encode the specified LZ match offset.  */
+static inline u32
+lzms_lz_offset_cost(const struct lzms_compressor *c, u32 offset)
 {
-       lz_mf_skip_positions(ctx->mf, n);
+       unsigned slot = lzms_get_offset_slot_fast(c, offset);
+
+       return (u32)(c->lz_offset_encoder.lens[slot] +
+                    lzms_extra_offset_bits[slot]) << LZMS_COST_SHIFT;
 }
 
-static u32
-lzms_get_literal_cost(struct lzms_compressor *ctx,
-                     struct lzms_adaptive_state *state, u8 literal)
+/*
+ * Consider coding the match at repeat offset index @rep_idx.  Consider each
+ * length from the minimum (2) to the full match length (@rep_len).
+ */
+static inline void
+lzms_consider_lz_repeat_offset_match(const struct lzms_compressor *c,
+                                    struct lzms_mc_pos_data *cur_optimum_ptr,
+                                    u32 rep_len, unsigned rep_idx)
 {
-       u32 cost = 0;
-
-       state->lru.upcoming_offset = 0;
-       lzms_update_lz_lru_queues(&state->lru);
+       u32 len;
+       u32 base_cost;
+       u32 cost;
+       unsigned i;
 
-       cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
-                                &state->main_state, 0);
+       base_cost = cur_optimum_ptr->cost;
 
-       cost += lzms_huffman_symbol_cost(&ctx->literal_encoder, literal);
+       base_cost += lzms_rc_bit_cost(&c->main_range_encoder,
+                                     cur_optimum_ptr->state.main_state, 1);
 
-       return cost;
-}
+       base_cost += lzms_rc_bit_cost(&c->match_range_encoder,
+                                     cur_optimum_ptr->state.match_state, 0);
 
-static u32
-lzms_get_lz_match_cost_nolen(struct lzms_compressor *ctx,
-                            struct lzms_adaptive_state *state, u32 offset)
-{
-       u32 cost = 0;
-       int recent_offset_idx;
+       base_cost += lzms_rc_bit_cost(&c->lz_match_range_encoder,
+                                     cur_optimum_ptr->state.lz_match_state, 1);
 
-       cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
-                                &state->main_state, 1);
-       cost += lzms_rc_bit_cost(&ctx->match_range_encoder,
-                                &state->match_state, 0);
+       for (i = 0; i < rep_idx; i++)
+               base_cost += lzms_rc_bit_cost(&c->lz_repeat_match_range_encoders[i],
+                                             cur_optimum_ptr->state.lz_repeat_match_state[i], 1);
 
-       for (recent_offset_idx = 0;
-            recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
-            recent_offset_idx++)
-               if (offset == state->lru.recent_offsets[recent_offset_idx])
-                       break;
+       if (i < LZMS_NUM_RECENT_OFFSETS - 1)
+               base_cost += lzms_rc_bit_cost(&c->lz_repeat_match_range_encoders[i],
+                                             cur_optimum_ptr->state.lz_repeat_match_state[i], 0);
 
-       if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
-               /* Explicit offset.  */
-               cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
-                                        &state->lz_match_state, 0);
+       len = 2;
+       do {
+               cost = base_cost + lzms_fast_length_cost(c, len);
+               if (cost < (cur_optimum_ptr + len)->cost) {
+                       (cur_optimum_ptr + len)->mc_item_data =
+                               ((u64)rep_idx << MC_OFFSET_SHIFT) | len;
+                       (cur_optimum_ptr + len)->cost = cost;
+               }
+       } while (++len <= rep_len);
+}
 
-               cost += lzms_offset_cost(&ctx->lz_offset_encoder, offset);
-       } else {
-               int i;
+/*
+ * Consider coding each match in @matches as an explicit offset match.
+ *
+ * @matches must be sorted by strictly increasing length and strictly increasing
+ * offset.  This is guaranteed by the match-finder.
+ *
+ * We consider each length from the minimum (2) to the longest
+ * (matches[num_matches - 1].len).  For each length, we consider only the
+ * smallest offset for which that length is available.  Although this is not
+ * guaranteed to be optimal due to the possibility of a larger offset costing
+ * less than a smaller offset to code, this is a very useful heuristic.
+ */
+static inline void
+lzms_consider_lz_explicit_offset_matches(const struct lzms_compressor *c,
+                                        struct lzms_mc_pos_data *cur_optimum_ptr,
+                                        const struct lz_match matches[],
+                                        u32 num_matches)
+{
+       u32 len;
+       u32 i;
+       u32 base_cost;
+       u32 position_cost;
+       u32 cost;
 
-               /* Recent offset.  */
-               cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
-                                        &state->lz_match_state, 1);
+       base_cost = cur_optimum_ptr->cost;
 
-               for (i = 0; i < recent_offset_idx; i++)
-                       cost += lzms_rc_bit_cost(&ctx->lz_repeat_match_range_encoders[i],
-                                                &state->lz_repeat_match_state[i], 0);
+       base_cost += lzms_rc_bit_cost(&c->main_range_encoder,
+                                     cur_optimum_ptr->state.main_state, 1);
 
-               if (i < LZMS_NUM_RECENT_OFFSETS - 1)
-                       cost += lzms_rc_bit_cost(&ctx->lz_repeat_match_range_encoders[i],
-                                                &state->lz_repeat_match_state[i], 1);
+       base_cost += lzms_rc_bit_cost(&c->match_range_encoder,
+                                     cur_optimum_ptr->state.match_state, 0);
 
+       base_cost += lzms_rc_bit_cost(&c->lz_match_range_encoder,
+                                     cur_optimum_ptr->state.lz_match_state, 0);
+       len = 2;
+       i = 0;
+       do {
+               position_cost = base_cost + lzms_lz_offset_cost(c,
+                                                               matches[i].offset);
+               do {
+                       cost = position_cost + lzms_fast_length_cost(c, len);
+                       if (cost < (cur_optimum_ptr + len)->cost) {
+                               (cur_optimum_ptr + len)->mc_item_data =
+                                       ((u64)(matches[i].offset + LZMS_OFFSET_OFFSET)
+                                               << MC_OFFSET_SHIFT) | len;
+                               (cur_optimum_ptr + len)->cost = cost;
+                       }
+               } while (++len <= matches[i].len);
+       } while (++i != num_matches);
+}
 
-               /* Initial update of the LZ match offset LRU queue.  */
-               for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
-                       state->lru.recent_offsets[i] = state->lru.recent_offsets[i + 1];
-       }
+static void
+lzms_init_adaptive_state(struct lzms_adaptive_state *state)
+{
+       unsigned i;
+
+       lzms_init_lz_lru_queues(&state->lru);
+       state->main_state = 0;
+       state->match_state = 0;
+       state->lz_match_state = 0;
+       for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
+               state->lz_repeat_match_state[i] = 0;
+}
 
+static inline void
+lzms_update_main_state(struct lzms_adaptive_state *state, int is_match)
+{
+       state->main_state = ((state->main_state << 1) | is_match) % LZMS_NUM_MAIN_STATES;
+}
 
-       state->lru.upcoming_offset = offset;
-       lzms_update_lz_lru_queues(&state->lru);
+static inline void
+lzms_update_match_state(struct lzms_adaptive_state *state, int is_delta)
+{
+       state->match_state = ((state->match_state << 1) | is_delta) % LZMS_NUM_MATCH_STATES;
+}
 
-       return cost;
+static inline void
+lzms_update_lz_match_state(struct lzms_adaptive_state *state, int is_repeat_offset)
+{
+       state->lz_match_state = ((state->lz_match_state << 1) | is_repeat_offset) % LZMS_NUM_LZ_MATCH_STATES;
 }
 
-static u32
-lzms_get_lz_match_cost(struct lzms_compressor *ctx,
-                      struct lzms_adaptive_state *state,
-                      u32 length, u32 offset)
+static inline void
+lzms_update_lz_repeat_match_state(struct lzms_adaptive_state *state, int rep_idx)
 {
-       return lzms_get_lz_match_cost_nolen(ctx, state, offset) +
-              lzms_get_length_cost(&ctx->length_encoder, length);
+       int i;
+
+       for (i = 0; i < rep_idx; i++)
+               state->lz_repeat_match_state[i] =
+                       ((state->lz_repeat_match_state[i] << 1) | 1) %
+                               LZMS_NUM_LZ_REPEAT_MATCH_STATES;
+
+       if (i < LZMS_NUM_RECENT_OFFSETS - 1)
+               state->lz_repeat_match_state[i] =
+                       ((state->lz_repeat_match_state[i] << 1) | 0) %
+                               LZMS_NUM_LZ_REPEAT_MATCH_STATES;
 }
 
-static inline u32
-lzms_repsearch(const u8 * const strptr, const u32 bytes_remaining,
-              const struct lzms_lz_lru_queues *queue, u32 *offset_ret)
+/*
+ * The main near-optimal parsing routine.
+ *
+ * Briefly, the algorithm does an approximate minimum-cost path search to find a
+ * "near-optimal" sequence of matches and literals to output, based on the
+ * current cost model.  The algorithm steps forward, position by position (byte
+ * by byte), and updates the minimum cost path to reach each later position that
+ * can be reached using a match or literal from the current position.  This is
+ * essentially Dijkstra's algorithm in disguise: the graph nodes are positions,
+ * the graph edges are possible matches/literals to code, and the cost of each
+ * edge is the estimated number of bits that will be required to output the
+ * corresponding match or literal.  But one difference is that we actually
+ * compute the lowest-cost path in pieces, where each piece is terminated when
+ * there are no choices to be made.
+ *
+ * Notes:
+ *
+ * - This does not output any delta matches.
+ *
+ * - The costs of literals and matches are estimated using the range encoder
+ *   states and the semi-adaptive Huffman codes.  Except for range encoding
+ *   states, costs are assumed to be constant throughout a single run of the
+ *   parsing algorithm, which can parse up to @optim_array_length bytes of data.
+ *   This introduces a source of inaccuracy because the probabilities and
+ *   Huffman codes can change over this part of the data.
+ */
+static void
+lzms_near_optimal_parse(struct lzms_compressor *c)
 {
+       const u8 *window_ptr;
+       const u8 *window_end;
+       struct lzms_mc_pos_data *cur_optimum_ptr;
+       struct lzms_mc_pos_data *end_optimum_ptr;
+       u32 num_matches;
+       u32 longest_len;
+       u32 rep_max_len;
+       unsigned rep_max_idx;
+       unsigned literal;
+       unsigned i;
+       u32 cost;
        u32 len;
-       unsigned slot = 0;
+       u32 offset_data;
 
-       len = lz_repsearch(strptr, bytes_remaining, UINT32_MAX,
-                          queue->recent_offsets, LZMS_NUM_RECENT_OFFSETS, &slot);
-       *offset_ret = queue->recent_offsets[slot];
-       return len;
-}
+       window_ptr = c->cur_window;
+       window_end = window_ptr + c->cur_window_size;
 
+       lzms_init_adaptive_state(&c->optimum[0].state);
 
-static struct lz_match
-lzms_match_chooser_reverse_list(struct lzms_compressor *ctx, unsigned cur_pos)
-{
-       unsigned prev_link, saved_prev_link;
-       unsigned prev_match_offset, saved_prev_match_offset;
+begin:
+       /* Start building a new list of items, which will correspond to the next
+        * piece of the overall minimum-cost path.  */
 
-       ctx->optimum_end_idx = cur_pos;
+       cur_optimum_ptr = c->optimum;
+       cur_optimum_ptr->cost = 0;
+       end_optimum_ptr = cur_optimum_ptr;
 
-       saved_prev_link = ctx->optimum[cur_pos].prev.link;
-       saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
+       /* States should currently be consistent with the encoders.  */
+       LZMS_ASSERT(cur_optimum_ptr->state.main_state == c->main_range_encoder.state);
+       LZMS_ASSERT(cur_optimum_ptr->state.match_state == c->match_range_encoder.state);
+       LZMS_ASSERT(cur_optimum_ptr->state.lz_match_state == c->lz_match_range_encoder.state);
+       for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
+               LZMS_ASSERT(cur_optimum_ptr->state.lz_repeat_match_state[i] ==
+                           c->lz_repeat_match_range_encoders[i].state);
 
-       do {
-               prev_link = saved_prev_link;
-               prev_match_offset = saved_prev_match_offset;
+       if (window_ptr == window_end)
+               return;
 
-               saved_prev_link = ctx->optimum[prev_link].prev.link;
-               saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
+       /* The following loop runs once for each per byte in the window, except
+        * in a couple shortcut cases.  */
+       for (;;) {
 
-               ctx->optimum[prev_link].next.link = cur_pos;
-               ctx->optimum[prev_link].next.match_offset = prev_match_offset;
+               /* Find explicit offset matches with the current position.  */
+               num_matches = lz_mf_get_matches(c->mf, c->matches);
 
-               cur_pos = prev_link;
-       } while (cur_pos != 0);
+               if (num_matches) {
+                       /*
+                        * Find the longest repeat offset match with the current
+                        * position.
+                        *
+                        * Heuristics:
+                        *
+                        * - Only search for repeat offset matches if the
+                        *   match-finder already found at least one match.
+                        *
+                        * - Only consider the longest repeat offset match.  It
+                        *   seems to be rare for the optimal parse to include a
+                        *   repeat offset match that doesn't have the longest
+                        *   length (allowing for the possibility that not all
+                        *   of that length is actually used).
+                        */
+                       if (likely(window_ptr - c->cur_window >= LZMS_MAX_INIT_RECENT_OFFSET)) {
+                               BUILD_BUG_ON(LZMS_NUM_RECENT_OFFSETS != 3);
+                               rep_max_len = lz_repsearch3(window_ptr,
+                                                           window_end - window_ptr,
+                                                           cur_optimum_ptr->state.lru.recent_offsets,
+                                                           &rep_max_idx);
+                       } else {
+                               rep_max_len = 0;
+                       }
 
-       ctx->optimum_cur_idx = ctx->optimum[0].next.link;
+                       if (rep_max_len) {
+                               /* If there's a very long repeat offset match,
+                                * choose it immediately.  */
+                               if (rep_max_len >= c->params.nice_match_length) {
 
-       return (struct lz_match)
-               { .len = ctx->optimum_cur_idx,
-                 .offset = ctx->optimum[0].next.match_offset,
-               };
-}
+                                       lz_mf_skip_positions(c->mf, rep_max_len - 1);
+                                       window_ptr += rep_max_len;
 
-/* This is similar to lzx_choose_near_optimal_item() in lzx-compress.c.
- * Read that one if you want to understand it.  */
-static struct lz_match
-lzms_get_near_optimal_item(struct lzms_compressor *ctx)
-{
-       u32 num_matches;
-       struct lz_match *matches;
-       struct lz_match match;
-       u32 longest_len;
-       u32 longest_rep_len;
-       u32 longest_rep_offset;
-       unsigned cur_pos;
-       unsigned end_pos;
-       struct lzms_adaptive_state initial_state;
-
-       if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
-               match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
-                                   ctx->optimum_cur_idx;
-               match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
-
-               ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
-               return match;
-       }
+                                       if (cur_optimum_ptr != c->optimum)
+                                               lzms_encode_item_list(c, cur_optimum_ptr);
 
-       ctx->optimum_cur_idx = 0;
-       ctx->optimum_end_idx = 0;
+                                       lzms_encode_lz_repeat_offset_match(c, rep_max_len,
+                                                                          rep_max_idx);
 
-       if (lz_mf_get_position(ctx->mf) >= LZMS_MAX_INIT_RECENT_OFFSET) {
-               longest_rep_len = lzms_repsearch(lz_mf_get_window_ptr(ctx->mf),
-                                                lz_mf_get_bytes_remaining(ctx->mf),
-                                                &ctx->lru.lz, &longest_rep_offset);
-       } else {
-               longest_rep_len = 0;
-       }
+                                       c->optimum[0].state = cur_optimum_ptr->state;
 
-       if (longest_rep_len >= ctx->params.nice_match_length) {
-               lzms_skip_bytes(ctx, longest_rep_len);
-               return (struct lz_match) {
-                       .len = longest_rep_len,
-                       .offset = longest_rep_offset,
-               };
-       }
+                                       lzms_update_main_state(&c->optimum[0].state, 1);
+                                       lzms_update_match_state(&c->optimum[0].state, 0);
+                                       lzms_update_lz_match_state(&c->optimum[0].state, 1);
+                                       lzms_update_lz_repeat_match_state(&c->optimum[0].state,
+                                                                         rep_max_idx);
 
-       num_matches = lzms_get_matches(ctx, &matches);
+                                       c->optimum[0].state.lru.upcoming_offset =
+                                               c->optimum[0].state.lru.recent_offsets[rep_max_idx];
 
-       if (num_matches) {
-               longest_len = matches[num_matches - 1].len;
-               if (longest_len >= ctx->params.nice_match_length) {
-                       lzms_skip_bytes(ctx, longest_len - 1);
-                       return matches[num_matches - 1];
-               }
-       } else {
-               longest_len = 1;
-       }
+                                       for (i = rep_max_idx; i < LZMS_NUM_RECENT_OFFSETS; i++)
+                                               c->optimum[0].state.lru.recent_offsets[i] =
+                                                       c->optimum[0].state.lru.recent_offsets[i + 1];
 
-       initial_state.lru = ctx->lru.lz;
-       initial_state.main_state = ctx->main_range_encoder.state;
-       initial_state.match_state = ctx->match_range_encoder.state;
-       initial_state.lz_match_state = ctx->lz_match_range_encoder.state;
-       for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
-               initial_state.lz_repeat_match_state[i] = ctx->lz_repeat_match_range_encoders[i].state;
+                                       lzms_update_lz_lru_queue(&c->optimum[0].state.lru);
+                                       goto begin;
+                               }
 
-       ctx->optimum[1].state = initial_state;
-       ctx->optimum[1].cost = lzms_get_literal_cost(ctx,
-                                                    &ctx->optimum[1].state,
-                                                    *(lz_mf_get_window_ptr(ctx->mf) - 1));
-       ctx->optimum[1].prev.link = 0;
+                               /* If reaching any positions for the first time,
+                                * initialize their costs to "infinity".  */
+                               while (end_optimum_ptr < cur_optimum_ptr + rep_max_len)
+                                       (++end_optimum_ptr)->cost = MC_INFINITE_COST;
 
-       for (u32 i = 0, len = 2; i < num_matches; i++) {
-               u32 offset = matches[i].offset;
-               struct lzms_adaptive_state state;
-               u32 position_cost;
+                               /* Consider coding a repeat offset match.  */
+                               lzms_consider_lz_repeat_offset_match(c, cur_optimum_ptr,
+                                                                    rep_max_len, rep_max_idx);
+                       }
 
-               state = initial_state;
-               position_cost = 0;
-               position_cost += lzms_get_lz_match_cost_nolen(ctx, &state, offset);
+                       longest_len = c->matches[num_matches - 1].len;
 
-               do {
-                       u32 cost;
+                       /* If there's a very long explicit offset match, choose
+                        * it immediately.  */
+                       if (longest_len >= c->params.nice_match_length) {
 
-                       cost = position_cost;
-                       cost += lzms_get_length_cost(&ctx->length_encoder, len);
+                               lz_mf_skip_positions(c->mf, longest_len - 1);
+                               window_ptr += longest_len;
 
-                       ctx->optimum[len].state = state;
-                       ctx->optimum[len].prev.link = 0;
-                       ctx->optimum[len].prev.match_offset = offset;
-                       ctx->optimum[len].cost = cost;
-               } while (++len <= matches[i].len);
-       }
-       end_pos = longest_len;
-
-       if (longest_rep_len) {
-               struct lzms_adaptive_state state;
-               u32 cost;
-
-               while (end_pos < longest_rep_len)
-                       ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
-
-               state = initial_state;
-               cost = lzms_get_lz_match_cost(ctx,
-                                             &state,
-                                             longest_rep_len,
-                                             longest_rep_offset);
-               if (cost <= ctx->optimum[longest_rep_len].cost) {
-                       ctx->optimum[longest_rep_len].state = state;
-                       ctx->optimum[longest_rep_len].prev.link = 0;
-                       ctx->optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
-                       ctx->optimum[longest_rep_len].cost = cost;
-               }
-       }
+                               if (cur_optimum_ptr != c->optimum)
+                                       lzms_encode_item_list(c, cur_optimum_ptr);
 
-       cur_pos = 0;
-       for (;;) {
-               u32 cost;
-               struct lzms_adaptive_state state;
+                               lzms_encode_lz_explicit_offset_match(c, longest_len,
+                                                                    c->matches[num_matches - 1].offset);
 
-               cur_pos++;
+                               c->optimum[0].state = cur_optimum_ptr->state;
 
-               if (cur_pos == end_pos || cur_pos == ctx->params.optim_array_length)
-                       return lzms_match_chooser_reverse_list(ctx, cur_pos);
+                               lzms_update_main_state(&c->optimum[0].state, 1);
+                               lzms_update_match_state(&c->optimum[0].state, 0);
+                               lzms_update_lz_match_state(&c->optimum[0].state, 0);
 
-               if (lz_mf_get_position(ctx->mf) >= LZMS_MAX_INIT_RECENT_OFFSET) {
-                       longest_rep_len = lzms_repsearch(lz_mf_get_window_ptr(ctx->mf),
-                                                        lz_mf_get_bytes_remaining(ctx->mf),
-                                                        &ctx->optimum[cur_pos].state.lru,
-                                                        &longest_rep_offset);
-               } else {
-                       longest_rep_len = 0;
-               }
+                               c->optimum[0].state.lru.upcoming_offset =
+                                       c->matches[num_matches - 1].offset;
 
-               if (longest_rep_len >= ctx->params.nice_match_length) {
-                       match = lzms_match_chooser_reverse_list(ctx, cur_pos);
+                               lzms_update_lz_lru_queue(&c->optimum[0].state.lru);
+                               goto begin;
+                       }
 
-                       ctx->optimum[cur_pos].next.match_offset = longest_rep_offset;
-                       ctx->optimum[cur_pos].next.link = cur_pos + longest_rep_len;
-                       ctx->optimum_end_idx = cur_pos + longest_rep_len;
+                       /* If reaching any positions for the first time,
+                        * initialize their costs to "infinity".  */
+                       while (end_optimum_ptr < cur_optimum_ptr + longest_len)
+                               (++end_optimum_ptr)->cost = MC_INFINITE_COST;
 
-                       lzms_skip_bytes(ctx, longest_rep_len);
+                       /* Consider coding an explicit offset match.  */
+                       lzms_consider_lz_explicit_offset_matches(c, cur_optimum_ptr,
+                                                                c->matches, num_matches);
+               } else {
+                       /* No matches found.  The only choice at this position
+                        * is to code a literal.  */
 
-                       return match;
+                       if (end_optimum_ptr == cur_optimum_ptr)
+                               (++end_optimum_ptr)->cost = MC_INFINITE_COST;
                }
 
-               num_matches = lzms_get_matches(ctx, &matches);
+               /* Consider coding a literal.
 
-               if (num_matches) {
-                       longest_len = matches[num_matches - 1].len;
-                       if (longest_len >= ctx->params.nice_match_length) {
-                               match = lzms_match_chooser_reverse_list(ctx, cur_pos);
+                * To avoid an extra unpredictable brench, actually checking the
+                * preferability of coding a literal is integrated into the
+                * adaptive state update code below.  */
+               literal = *window_ptr++;
+               cost = cur_optimum_ptr->cost +
+                      lzms_literal_cost(c, literal, &cur_optimum_ptr->state);
 
-                               ctx->optimum[cur_pos].next.match_offset =
-                                       matches[num_matches - 1].offset;
-                               ctx->optimum[cur_pos].next.link = cur_pos + longest_len;
-                               ctx->optimum_end_idx = cur_pos + longest_len;
+               /* Advance to the next position.  */
+               cur_optimum_ptr++;
 
-                               lzms_skip_bytes(ctx, longest_len - 1);
+               /* The lowest-cost path to the current position is now known.
+                * Finalize the adaptive state that results from taking this
+                * lowest-cost path.  */
 
-                               return match;
-                       }
-               } else {
-                       longest_len = 1;
-               }
+               if (cost < cur_optimum_ptr->cost) {
+                       /* Literal  */
+                       cur_optimum_ptr->cost = cost;
+                       cur_optimum_ptr->mc_item_data = ((u64)literal << MC_OFFSET_SHIFT) | 1;
 
-               while (end_pos < cur_pos + longest_len)
-                       ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
-
-               state = ctx->optimum[cur_pos].state;
-               cost = ctx->optimum[cur_pos].cost +
-                       lzms_get_literal_cost(ctx,
-                                             &state,
-                                             *(lz_mf_get_window_ptr(ctx->mf) - 1));
-               if (cost < ctx->optimum[cur_pos + 1].cost) {
-                       ctx->optimum[cur_pos + 1].state = state;
-                       ctx->optimum[cur_pos + 1].cost = cost;
-                       ctx->optimum[cur_pos + 1].prev.link = cur_pos;
-               }
+                       cur_optimum_ptr->state = (cur_optimum_ptr - 1)->state;
 
-               for (u32 i = 0, len = 2; i < num_matches; i++) {
-                       u32 offset = matches[i].offset;
-                       struct lzms_adaptive_state state;
-                       u32 position_cost;
+                       lzms_update_main_state(&cur_optimum_ptr->state, 0);
 
-                       state = ctx->optimum[cur_pos].state;
-                       position_cost = ctx->optimum[cur_pos].cost;
-                       position_cost += lzms_get_lz_match_cost_nolen(ctx, &state, offset);
+                       cur_optimum_ptr->state.lru.upcoming_offset = 0;
+               } else {
+                       /* LZ match  */
+                       len = cur_optimum_ptr->mc_item_data & MC_LEN_MASK;
+                       offset_data = cur_optimum_ptr->mc_item_data >> MC_OFFSET_SHIFT;
 
-                       do {
-                               u32 cost;
+                       cur_optimum_ptr->state = (cur_optimum_ptr - len)->state;
 
-                               cost = position_cost;
-                               cost += lzms_get_length_cost(&ctx->length_encoder, len);
+                       lzms_update_main_state(&cur_optimum_ptr->state, 1);
+                       lzms_update_match_state(&cur_optimum_ptr->state, 0);
 
-                               if (cost < ctx->optimum[cur_pos + len].cost) {
-                                       ctx->optimum[cur_pos + len].state = state;
-                                       ctx->optimum[cur_pos + len].prev.link = cur_pos;
-                                       ctx->optimum[cur_pos + len].prev.match_offset = offset;
-                                       ctx->optimum[cur_pos + len].cost = cost;
-                               }
-                       } while (++len <= matches[i].len);
-               }
+                       if (offset_data >= LZMS_NUM_RECENT_OFFSETS) {
 
-               if (longest_rep_len >= ctx->params.min_match_length) {
-
-                       while (end_pos < cur_pos + longest_rep_len)
-                               ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
-
-                       state = ctx->optimum[cur_pos].state;
-
-                       cost = ctx->optimum[cur_pos].cost +
-                               lzms_get_lz_match_cost(ctx,
-                                                      &state,
-                                                      longest_rep_len,
-                                                      longest_rep_offset);
-                       if (cost <= ctx->optimum[cur_pos + longest_rep_len].cost) {
-                               ctx->optimum[cur_pos + longest_rep_len].state =
-                                       state;
-                               ctx->optimum[cur_pos + longest_rep_len].prev.link =
-                                       cur_pos;
-                               ctx->optimum[cur_pos + longest_rep_len].prev.match_offset =
-                                       longest_rep_offset;
-                               ctx->optimum[cur_pos + longest_rep_len].cost =
-                                       cost;
-                       }
-               }
-       }
-}
+                               /* Explicit offset LZ match  */
 
-/*
- * The main loop for the LZMS compressor.
- *
- * Notes:
- *
- * - This does not output any delta matches.
- *
- * - The costs of literals and matches are estimated using the range encoder
- *   states and the semi-adaptive Huffman codes.  Except for range encoding
- *   states, costs are assumed to be constant throughout a single run of the
- *   parsing algorithm, which can parse up to @optim_array_length bytes of data.
- *   This introduces a source of inaccuracy because the probabilities and
- *   Huffman codes can change over this part of the data.
- */
-static void
-lzms_encode(struct lzms_compressor *ctx)
-{
-       struct lz_match item;
+                               lzms_update_lz_match_state(&cur_optimum_ptr->state, 0);
+
+                               cur_optimum_ptr->state.lru.upcoming_offset =
+                                       offset_data - LZMS_OFFSET_OFFSET;
+                       } else {
+                               /* Repeat offset LZ match  */
 
-       /* Load window into the match-finder.  */
-       lz_mf_load_window(ctx->mf, ctx->window, ctx->window_size);
+                               lzms_update_lz_match_state(&cur_optimum_ptr->state, 1);
+                               lzms_update_lz_repeat_match_state(&cur_optimum_ptr->state,
+                                                                 offset_data);
 
-       /* Reset the match-chooser.  */
-       ctx->optimum_cur_idx = 0;
-       ctx->optimum_end_idx = 0;
+                               cur_optimum_ptr->state.lru.upcoming_offset =
+                                       cur_optimum_ptr->state.lru.recent_offsets[offset_data];
 
-       while (ctx->cur_window_pos != ctx->window_size) {
-               item = lzms_get_near_optimal_item(ctx);
-               if (item.len <= 1)
-                       lzms_encode_literal(ctx, ctx->window[ctx->cur_window_pos]);
-               else
-                       lzms_encode_lz_match(ctx, item.len, item.offset);
+                               for (i = offset_data; i < LZMS_NUM_RECENT_OFFSETS; i++)
+                                       cur_optimum_ptr->state.lru.recent_offsets[i] =
+                                               cur_optimum_ptr->state.lru.recent_offsets[i + 1];
+                       }
+               }
+
+               lzms_update_lz_lru_queue(&cur_optimum_ptr->state.lru);
+
+               /*
+                * This loop will terminate when either of the following
+                * conditions is true:
+                *
+                * (1) cur_optimum_ptr == end_optimum_ptr
+                *
+                *      There are no paths that extend beyond the current
+                *      position.  In this case, any path to a later position
+                *      must pass through the current position, so we can go
+                *      ahead and choose the list of items that led to this
+                *      position.
+                *
+                * (2) cur_optimum_ptr == c->optimum_end
+                *
+                *      This bounds the number of times the algorithm can step
+                *      forward before it is guaranteed to start choosing items.
+                *      This limits the memory usage.  It also guarantees that
+                *      the parser will not go too long without updating the
+                *      probability tables.
+                *
+                * Note: no check for end-of-block is needed because
+                * end-of-block will trigger condition (1).
+                */
+               if (cur_optimum_ptr == end_optimum_ptr ||
+                   cur_optimum_ptr == c->optimum_end)
+               {
+                       c->optimum[0].state = cur_optimum_ptr->state;
+                       break;
+               }
        }
+
+       /* Output the current list of items that constitute the minimum-cost
+        * path to the current position.  */
+       lzms_encode_item_list(c, cur_optimum_ptr);
+       goto begin;
 }
 
 static void
@@ -1154,6 +1241,7 @@ lzms_init_range_encoder(struct lzms_range_encoder *enc,
 {
        enc->rc = rc;
        enc->state = 0;
+       LZMS_ASSERT(is_power_of_2(num_states));
        enc->mask = num_states - 1;
        for (u32 i = 0; i < num_states; i++) {
                enc->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
@@ -1181,77 +1269,72 @@ lzms_init_huffman_encoder(struct lzms_huffman_encoder *enc,
                                    enc->codewords);
 }
 
-/* Initialize the LZMS compressor.  */
+/* Prepare the LZMS compressor for compressing a block of data.  */
 static void
-lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen,
-                    le16 *cdata, u32 clen16)
+lzms_prepare_compressor(struct lzms_compressor *c, const u8 *udata, u32 ulen,
+                       le16 *cdata, u32 clen16)
 {
-       unsigned num_position_slots;
+       unsigned num_offset_slots;
 
-       /* Copy the uncompressed data into the @ctx->window buffer.  */
-       memcpy(ctx->window, udata, ulen);
-       ctx->cur_window_pos = 0;
-       ctx->window_size = ulen;
+       /* Copy the uncompressed data into the @c->cur_window buffer.  */
+       memcpy(c->cur_window, udata, ulen);
+       c->cur_window_size = ulen;
 
        /* Initialize the raw range encoder (writing forwards).  */
-       lzms_range_encoder_raw_init(&ctx->rc, cdata, clen16);
+       lzms_range_encoder_raw_init(&c->rc, cdata, clen16);
 
        /* Initialize the output bitstream for Huffman symbols and verbatim bits
         * (writing backwards).  */
-       lzms_output_bitstream_init(&ctx->os, cdata, clen16);
-
-       /* Calculate the number of position slots needed for this compressed
-        * block.  */
-       num_position_slots = lzms_get_position_slot(ulen - 1) + 1;
+       lzms_output_bitstream_init(&c->os, cdata, clen16);
 
-       LZMS_DEBUG("Using %u position slots", num_position_slots);
+       /* Calculate the number of offset slots required.  */
+       num_offset_slots = lzms_get_offset_slot(ulen - 1) + 1;
 
-       /* Initialize Huffman encoders for each alphabet used in the compressed
-        * representation.  */
-       lzms_init_huffman_encoder(&ctx->literal_encoder, &ctx->os,
+       /* Initialize a Huffman encoder for each alphabet.  */
+       lzms_init_huffman_encoder(&c->literal_encoder, &c->os,
                                  LZMS_NUM_LITERAL_SYMS,
                                  LZMS_LITERAL_CODE_REBUILD_FREQ);
 
-       lzms_init_huffman_encoder(&ctx->lz_offset_encoder, &ctx->os,
-                                 num_position_slots,
+       lzms_init_huffman_encoder(&c->lz_offset_encoder, &c->os,
+                                 num_offset_slots,
                                  LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
 
-       lzms_init_huffman_encoder(&ctx->length_encoder, &ctx->os,
+       lzms_init_huffman_encoder(&c->length_encoder, &c->os,
                                  LZMS_NUM_LEN_SYMS,
                                  LZMS_LENGTH_CODE_REBUILD_FREQ);
 
-       lzms_init_huffman_encoder(&ctx->delta_offset_encoder, &ctx->os,
-                                 num_position_slots,
+       lzms_init_huffman_encoder(&c->delta_offset_encoder, &c->os,
+                                 num_offset_slots,
                                  LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
 
-       lzms_init_huffman_encoder(&ctx->delta_power_encoder, &ctx->os,
+       lzms_init_huffman_encoder(&c->delta_power_encoder, &c->os,
                                  LZMS_NUM_DELTA_POWER_SYMS,
                                  LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
 
        /* Initialize range encoders, all of which wrap around the same
         * lzms_range_encoder_raw.  */
-       lzms_init_range_encoder(&ctx->main_range_encoder,
-                               &ctx->rc, LZMS_NUM_MAIN_STATES);
+       lzms_init_range_encoder(&c->main_range_encoder,
+                               &c->rc, LZMS_NUM_MAIN_STATES);
 
-       lzms_init_range_encoder(&ctx->match_range_encoder,
-                               &ctx->rc, LZMS_NUM_MATCH_STATES);
+       lzms_init_range_encoder(&c->match_range_encoder,
+                               &c->rc, LZMS_NUM_MATCH_STATES);
 
-       lzms_init_range_encoder(&ctx->lz_match_range_encoder,
-                               &ctx->rc, LZMS_NUM_LZ_MATCH_STATES);
+       lzms_init_range_encoder(&c->lz_match_range_encoder,
+                               &c->rc, LZMS_NUM_LZ_MATCH_STATES);
 
-       for (size_t i = 0; i < ARRAY_LEN(ctx->lz_repeat_match_range_encoders); i++)
-               lzms_init_range_encoder(&ctx->lz_repeat_match_range_encoders[i],
-                                       &ctx->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
+       for (unsigned i = 0; i < ARRAY_LEN(c->lz_repeat_match_range_encoders); i++)
+               lzms_init_range_encoder(&c->lz_repeat_match_range_encoders[i],
+                                       &c->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
 
-       lzms_init_range_encoder(&ctx->delta_match_range_encoder,
-                               &ctx->rc, LZMS_NUM_DELTA_MATCH_STATES);
+       lzms_init_range_encoder(&c->delta_match_range_encoder,
+                               &c->rc, LZMS_NUM_DELTA_MATCH_STATES);
 
-       for (size_t i = 0; i < ARRAY_LEN(ctx->delta_repeat_match_range_encoders); i++)
-               lzms_init_range_encoder(&ctx->delta_repeat_match_range_encoders[i],
-                                       &ctx->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
+       for (unsigned i = 0; i < ARRAY_LEN(c->delta_repeat_match_range_encoders); i++)
+               lzms_init_range_encoder(&c->delta_repeat_match_range_encoders[i],
+                                       &c->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
 
-       /* Initialize LRU match information.  */
-       lzms_init_lru_queues(&ctx->lru);
+       /* Set initial length costs for lengths < LZMS_NUM_FAST_LENGTHS.  */
+       lzms_update_fast_length_costs(c);
 }
 
 /* Flush the output streams, prepare the final compressed data, and return its
@@ -1260,66 +1343,77 @@ lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen,
  * A return value of 0 indicates that the data could not be compressed to fit in
  * the available space.  */
 static size_t
-lzms_finalize(struct lzms_compressor *ctx, u8 *cdata, size_t csize_avail)
+lzms_finalize(struct lzms_compressor *c, u8 *cdata, size_t csize_avail)
 {
        size_t num_forwards_bytes;
        size_t num_backwards_bytes;
-       size_t compressed_size;
 
        /* Flush both the forwards and backwards streams, and make sure they
         * didn't cross each other and start overwriting each other's data.  */
-       if (!lzms_output_bitstream_flush(&ctx->os)) {
-               LZMS_DEBUG("Backwards bitstream overrun.");
+       if (!lzms_output_bitstream_flush(&c->os))
                return 0;
-       }
 
-       if (!lzms_range_encoder_raw_flush(&ctx->rc)) {
-               LZMS_DEBUG("Forwards bitstream overrun.");
+       if (!lzms_range_encoder_raw_flush(&c->rc))
                return 0;
-       }
 
-       if (ctx->rc.out > ctx->os.out) {
-               LZMS_DEBUG("Two bitstreams crossed.");
+       if (c->rc.next > c->os.next)
                return 0;
-       }
 
        /* Now the compressed buffer contains the data output by the forwards
         * bitstream, then empty space, then data output by the backwards
         * bitstream.  Move the data output by the backwards bitstream to be
         * adjacent to the data output by the forward bitstream, and calculate
         * the compressed size that this results in.  */
-       num_forwards_bytes = (u8*)ctx->rc.out - (u8*)cdata;
-       num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)ctx->os.out;
+       num_forwards_bytes = (u8*)c->rc.next - (u8*)cdata;
+       num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)c->os.next;
 
-       memmove(cdata + num_forwards_bytes, ctx->os.out, num_backwards_bytes);
+       memmove(cdata + num_forwards_bytes, c->os.next, num_backwards_bytes);
 
-       compressed_size = num_forwards_bytes + num_backwards_bytes;
-       LZMS_DEBUG("num_forwards_bytes=%zu, num_backwards_bytes=%zu, "
-                  "compressed_size=%zu",
-                  num_forwards_bytes, num_backwards_bytes, compressed_size);
-       LZMS_ASSERT(compressed_size % 2 == 0);
-       return compressed_size;
+       return num_forwards_bytes + num_backwards_bytes;
 }
 
-
+/* Set internal compression parameters for the specified compression level and
+ * maximum window size.  */
 static void
 lzms_build_params(unsigned int compression_level,
                  struct lzms_compressor_params *lzms_params)
 {
-       lzms_params->min_match_length  = (compression_level >= 50) ? 2 : 3;
-       lzms_params->nice_match_length = max(((u64)compression_level * 32) / 50,
-                                            lzms_params->min_match_length);
-       lzms_params->max_search_depth  = ((u64)compression_level * 50) / 50;
-       lzms_params->optim_array_length = 224 + compression_level * 16;
+       /* Allow length 2 matches if the compression level is sufficiently high.
+        */
+       if (compression_level >= 45)
+               lzms_params->min_match_length = 2;
+       else
+               lzms_params->min_match_length = 3;
+
+       /* Scale nice_match_length and max_search_depth with the compression
+        * level.  But to allow an optimization on length cost calculations,
+        * don't allow nice_match_length to exceed LZMS_NUM_FAST_LENGTH.  */
+       lzms_params->nice_match_length = ((u64)compression_level * 32) / 50;
+       if (lzms_params->nice_match_length < lzms_params->min_match_length)
+               lzms_params->nice_match_length = lzms_params->min_match_length;
+       if (lzms_params->nice_match_length > LZMS_NUM_FAST_LENGTHS)
+               lzms_params->nice_match_length = LZMS_NUM_FAST_LENGTHS;
+       lzms_params->max_search_depth = compression_level;
+
+       lzms_params->optim_array_length = 1024;
 }
 
+/* Given the internal compression parameters and maximum window size, build the
+ * Lempel-Ziv match-finder parameters.  */
 static void
 lzms_build_mf_params(const struct lzms_compressor_params *lzms_params,
                     u32 max_window_size, struct lz_mf_params *mf_params)
 {
        memset(mf_params, 0, sizeof(*mf_params));
 
-       mf_params->algorithm = LZ_MF_DEFAULT;
+       /* Choose an appropriate match-finding algorithm.  */
+       if (max_window_size <= 2097152)
+               mf_params->algorithm = LZ_MF_BINARY_TREES;
+       else if (max_window_size <= 33554432)
+               mf_params->algorithm = LZ_MF_LCP_INTERVAL_TREE;
+       else
+               mf_params->algorithm = LZ_MF_LINKED_SUFFIX_ARRAY;
+
        mf_params->max_window_size = max_window_size;
        mf_params->min_match_len = lzms_params->min_match_length;
        mf_params->max_search_depth = lzms_params->max_search_depth;
@@ -1327,23 +1421,34 @@ lzms_build_mf_params(const struct lzms_compressor_params *lzms_params,
 }
 
 static void
-lzms_free_compressor(void *_ctx);
+lzms_free_compressor(void *_c);
 
 static u64
 lzms_get_needed_memory(size_t max_block_size, unsigned int compression_level)
 {
        struct lzms_compressor_params params;
+       struct lz_mf_params mf_params;
        u64 size = 0;
 
        if (max_block_size >= INT32_MAX)
                return 0;
 
        lzms_build_params(compression_level, &params);
+       lzms_build_mf_params(&params, max_block_size, &mf_params);
 
        size += sizeof(struct lzms_compressor);
+
+       /* cur_window */
        size += max_block_size;
-       size += lz_mf_get_needed_memory(LZ_MF_DEFAULT, max_block_size);
-       size += params.max_search_depth * sizeof(struct lz_match);
+
+       /* mf */
+       size += lz_mf_get_needed_memory(mf_params.algorithm, max_block_size);
+
+       /* matches */
+       size += min(params.max_search_depth, params.nice_match_length) *
+               sizeof(struct lz_match);
+
+       /* optimum */
        size += (params.optim_array_length + params.nice_match_length) *
                sizeof(struct lzms_mc_pos_data);
 
@@ -1354,7 +1459,7 @@ static int
 lzms_create_compressor(size_t max_block_size, unsigned int compression_level,
                       void **ctx_ret)
 {
-       struct lzms_compressor *ctx;
+       struct lzms_compressor *c;
        struct lzms_compressor_params params;
        struct lz_mf_params mf_params;
 
@@ -1366,60 +1471,56 @@ lzms_create_compressor(size_t max_block_size, unsigned int compression_level,
        if (!lz_mf_params_valid(&mf_params))
                return WIMLIB_ERR_INVALID_PARAM;
 
-       ctx = CALLOC(1, sizeof(struct lzms_compressor));
-       if (!ctx)
+       c = CALLOC(1, sizeof(struct lzms_compressor));
+       if (!c)
                goto oom;
 
-       ctx->params = params;
-       ctx->max_block_size = max_block_size;
+       c->params = params;
 
-       ctx->window = MALLOC(max_block_size);
-       if (!ctx->window)
+       c->cur_window = MALLOC(max_block_size);
+       if (!c->cur_window)
                goto oom;
 
-       ctx->mf = lz_mf_alloc(&mf_params);
-       if (!ctx->mf)
+       c->mf = lz_mf_alloc(&mf_params);
+       if (!c->mf)
                goto oom;
 
-       ctx->matches = MALLOC(params.max_search_depth * sizeof(struct lz_match));
-       if (!ctx->matches)
+       c->matches = MALLOC(min(params.max_search_depth,
+                               params.nice_match_length) *
+                           sizeof(struct lz_match));
+       if (!c->matches)
                goto oom;
 
-       ctx->optimum = MALLOC((params.optim_array_length +
-                              params.nice_match_length) *
-                               sizeof(struct lzms_mc_pos_data));
-       if (!ctx->optimum)
+       c->optimum = MALLOC((params.optim_array_length +
+                            params.nice_match_length) *
+                           sizeof(struct lzms_mc_pos_data));
+       if (!c->optimum)
                goto oom;
+       c->optimum_end = &c->optimum[params.optim_array_length];
 
-       /* Initialize position and length slot data if not done already.  */
        lzms_init_slots();
 
-       /* Initialize range encoding cost table if not done already.  */
        lzms_init_rc_costs();
 
-       *ctx_ret = ctx;
+       lzms_init_fast_slots(c);
+
+       *ctx_ret = c;
        return 0;
 
 oom:
-       lzms_free_compressor(ctx);
+       lzms_free_compressor(c);
        return WIMLIB_ERR_NOMEM;
 }
 
 static size_t
 lzms_compress(const void *uncompressed_data, size_t uncompressed_size,
-             void *compressed_data, size_t compressed_size_avail, void *_ctx)
+             void *compressed_data, size_t compressed_size_avail, void *_c)
 {
-       struct lzms_compressor *ctx = _ctx;
-       size_t compressed_size;
-
-       LZMS_DEBUG("uncompressed_size=%zu, compressed_size_avail=%zu",
-                  uncompressed_size, compressed_size_avail);
+       struct lzms_compressor *c = _c;
 
        /* Don't bother compressing extremely small inputs.  */
-       if (uncompressed_size < 4) {
-               LZMS_DEBUG("Input too small to bother compressing.");
+       if (uncompressed_size < 4)
                return 0;
-       }
 
        /* Cap the available compressed size to a 32-bit integer and round it
         * down to the nearest multiple of 2.  */
@@ -1429,43 +1530,35 @@ lzms_compress(const void *uncompressed_data, size_t uncompressed_size,
                compressed_size_avail--;
 
        /* Initialize the compressor structures.  */
-       lzms_init_compressor(ctx, uncompressed_data, uncompressed_size,
-                            compressed_data, compressed_size_avail / 2);
+       lzms_prepare_compressor(c, uncompressed_data, uncompressed_size,
+                               compressed_data, compressed_size_avail / 2);
 
        /* Preprocess the uncompressed data.  */
-       lzms_x86_filter(ctx->window, ctx->window_size,
-                       ctx->last_target_usages, false);
+       lzms_x86_filter(c->cur_window, c->cur_window_size,
+                       c->last_target_usages, false);
+
+       /* Load the window into the match-finder.  */
+       lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
 
        /* Compute and encode a literal/match sequence that decompresses to the
         * preprocessed data.  */
-       lzms_encode(ctx);
-
-       /* Get and return the compressed data size.  */
-       compressed_size = lzms_finalize(ctx, compressed_data,
-                                       compressed_size_avail);
-
-       if (compressed_size == 0) {
-               LZMS_DEBUG("Data did not compress to requested size or less.");
-               return 0;
-       }
-
-       LZMS_DEBUG("Compressed %zu => %zu bytes",
-                  uncompressed_size, compressed_size);
+       lzms_near_optimal_parse(c);
 
-       return compressed_size;
+       /* Return the compressed data size or 0.  */
+       return lzms_finalize(c, compressed_data, compressed_size_avail);
 }
 
 static void
-lzms_free_compressor(void *_ctx)
+lzms_free_compressor(void *_c)
 {
-       struct lzms_compressor *ctx = _ctx;
-
-       if (ctx) {
-               FREE(ctx->window);
-               lz_mf_free(ctx->mf);
-               FREE(ctx->matches);
-               FREE(ctx->optimum);
-               FREE(ctx);
+       struct lzms_compressor *c = _c;
+
+       if (c) {
+               FREE(c->cur_window);
+               lz_mf_free(c->mf);
+               FREE(c->matches);
+               FREE(c->optimum);
+               FREE(c);
        }
 }