+struct lzms_match {
+ u32 length;
+ u32 offset;
+};
+
+/* Initialize the output bitstream @os to write forwards to the specified
+ * compressed data buffer @out that is @out_limit 16-bit integers long. */
+static void
+lzms_output_bitstream_init(struct lzms_output_bitstream *os,
+ le16 *out, size_t out_limit)
+{
+ os->bitbuf = 0;
+ os->num_free_bits = 16;
+ os->out = out + out_limit;
+ os->num_le16_remaining = out_limit;
+ os->overrun = false;
+}
+
+/* Write @num_bits bits, contained in the low @num_bits bits of @bits (ordered
+ * from high-order to low-order), to the output bitstream @os. */
+static void
+lzms_output_bitstream_put_bits(struct lzms_output_bitstream *os,
+ u32 bits, unsigned num_bits)
+{
+ bits &= (1U << num_bits) - 1;
+
+ while (num_bits > os->num_free_bits) {
+
+ if (unlikely(os->num_le16_remaining == 0)) {
+ os->overrun = true;
+ return;
+ }
+
+ unsigned num_fill_bits = os->num_free_bits;
+
+ os->bitbuf <<= num_fill_bits;
+ os->bitbuf |= bits >> (num_bits - num_fill_bits);
+
+ *--os->out = cpu_to_le16(os->bitbuf);
+ --os->num_le16_remaining;
+
+ os->num_free_bits = 16;
+ num_bits -= num_fill_bits;
+ bits &= (1U << num_bits) - 1;
+ }
+ os->bitbuf <<= num_bits;
+ os->bitbuf |= bits;
+ os->num_free_bits -= num_bits;
+}
+
+/* Flush the output bitstream, ensuring that all bits written to it have been
+ * written to memory. Returns %true if all bits were output successfully, or
+ * %false if an overrun occurred. */
+static bool
+lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
+{
+ if (os->num_free_bits != 16)
+ lzms_output_bitstream_put_bits(os, 0, os->num_free_bits + 1);
+ return !os->overrun;
+}
+
+/* Initialize the range encoder @rc to write forwards to the specified
+ * compressed data buffer @out that is @out_limit 16-bit integers long. */
+static void
+lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
+ le16 *out, size_t out_limit)
+{
+ rc->low = 0;
+ rc->range = 0xffffffff;
+ rc->cache = 0;
+ rc->cache_size = 1;
+ rc->out = out;
+ rc->num_le16_remaining = out_limit;
+ rc->first = true;
+ rc->overrun = false;
+}
+
+/*
+ * Attempt to flush bits from the range encoder.
+ *
+ * Note: this is based on the public domain code for LZMA written by Igor
+ * Pavlov. The only differences in this function are that in LZMS the bits must
+ * be output in 16-bit coding units instead of 8-bit coding units, and that in
+ * LZMS the first coding unit is not ignored by the decompressor, so the encoder
+ * cannot output a dummy value to that position.
+ *
+ * The basic idea is that we're writing bits from @rc->low to the output.
+ * However, due to carrying, the writing of coding units with value 0xffff, as
+ * well as one prior coding unit, must be delayed until it is determined whether
+ * a carry is needed.
+ */
+static void
+lzms_range_encoder_raw_shift_low(struct lzms_range_encoder_raw *rc)
+{
+ LZMS_DEBUG("low=%"PRIx64", cache=%"PRIx64", cache_size=%u",
+ rc->low, rc->cache, rc->cache_size);
+ if ((u32)(rc->low) < 0xffff0000 ||
+ (u32)(rc->low >> 32) != 0)
+ {
+ /* Carry not needed (rc->low < 0xffff0000), or carry occurred
+ * ((rc->low >> 32) != 0, a.k.a. the carry bit is 1). */
+ do {
+ if (!rc->first) {
+ if (rc->num_le16_remaining == 0) {
+ rc->overrun = true;
+ return;
+ }
+ *rc->out++ = cpu_to_le16(rc->cache +
+ (u16)(rc->low >> 32));
+ --rc->num_le16_remaining;
+ } else {
+ rc->first = false;
+ }
+
+ rc->cache = 0xffff;
+ } while (--rc->cache_size != 0);
+
+ rc->cache = (rc->low >> 16) & 0xffff;
+ }
+ ++rc->cache_size;
+ rc->low = (rc->low & 0xffff) << 16;
+}
+
+static void
+lzms_range_encoder_raw_normalize(struct lzms_range_encoder_raw *rc)
+{
+ if (rc->range <= 0xffff) {
+ rc->range <<= 16;
+ lzms_range_encoder_raw_shift_low(rc);
+ }
+}
+
+static bool
+lzms_range_encoder_raw_flush(struct lzms_range_encoder_raw *rc)
+{
+ for (unsigned i = 0; i < 4; i++)
+ lzms_range_encoder_raw_shift_low(rc);
+ return !rc->overrun;
+}
+
+/* Encode the next bit using the range encoder (raw version).
+ *
+ * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0. */
+static void
+lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc, int bit,
+ u32 prob)
+{
+ lzms_range_encoder_raw_normalize(rc);
+
+ u32 bound = (rc->range >> LZMS_PROBABILITY_BITS) * prob;
+ if (bit == 0) {
+ rc->range = bound;
+ } else {
+ rc->low += bound;
+ rc->range -= bound;
+ }
+}
+
+/* Encode a bit using the specified range encoder. This wraps around
+ * lzms_range_encoder_raw_encode_bit() to handle using and updating the
+ * appropriate probability table. */
+static void
+lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
+{
+ struct lzms_probability_entry *prob_entry;
+ u32 prob;
+
+ /* Load the probability entry corresponding to the current state. */
+ prob_entry = &enc->prob_entries[enc->state];
+
+ /* Treat the number of zero bits in the most recently encoded
+ * LZMS_PROBABILITY_MAX bits with this probability entry as the chance,
+ * out of LZMS_PROBABILITY_MAX, that the next bit will be a 0. However,
+ * don't allow 0% or 100% probabilities. */
+ prob = prob_entry->num_recent_zero_bits;
+ if (prob == 0)
+ prob = 1;
+ else if (prob == LZMS_PROBABILITY_MAX)
+ prob = LZMS_PROBABILITY_MAX - 1;
+
+ /* Encode the next bit. */
+ lzms_range_encoder_raw_encode_bit(enc->rc, bit, prob);
+
+ /* Update the state based on the newly encoded bit. */
+ enc->state = ((enc->state << 1) | bit) & enc->mask;
+
+ /* Update the recent bits, including the cached count of 0's. */
+ BUILD_BUG_ON(LZMS_PROBABILITY_MAX > sizeof(prob_entry->recent_bits) * 8);
+ if (bit == 0) {
+ if (prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1))) {
+ /* Replacing 1 bit with 0 bit; increment the zero count.
+ */
+ prob_entry->num_recent_zero_bits++;
+ }
+ } else {
+ if (!(prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1)))) {
+ /* Replacing 0 bit with 1 bit; decrement the zero count.
+ */
+ prob_entry->num_recent_zero_bits--;
+ }
+ }
+ prob_entry->recent_bits = (prob_entry->recent_bits << 1) | bit;
+}
+
+/* Encode a symbol using the specified Huffman encoder. */
+static void
+lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, u32 sym)
+{
+ LZMS_ASSERT(sym < enc->num_syms);
+ if (enc->num_syms_written == enc->rebuild_freq) {
+ /* Adaptive code needs to be rebuilt. */
+ LZMS_DEBUG("Rebuilding code (num_syms=%u)", enc->num_syms);
+ make_canonical_huffman_code(enc->num_syms,
+ LZMS_MAX_CODEWORD_LEN,
+ enc->sym_freqs,
+ enc->lens,
+ enc->codewords);
+
+ /* Dilute the frequencies. */
+ for (unsigned i = 0; i < enc->num_syms; i++) {
+ enc->sym_freqs[i] >>= 1;
+ enc->sym_freqs[i] += 1;
+ }
+ enc->num_syms_written = 0;
+ }
+ lzms_output_bitstream_put_bits(enc->os,
+ enc->codewords[sym],
+ enc->lens[sym]);
+ ++enc->num_syms_written;
+ ++enc->sym_freqs[sym];
+}
+
+/* Encode a number as a Huffman symbol specifying a slot, plus a number of
+ * slot-dependent extra bits. */
+static void
+lzms_encode_value(struct lzms_huffman_encoder *enc, u32 value)
+{
+ unsigned slot;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ LZMS_ASSERT(enc->slot_base_tab != NULL);
+
+ slot = lzms_get_slot(value, enc->slot_base_tab, enc->num_syms);
+
+ /* Get the number of extra bits needed to represent the range of values
+ * that share the slot. */
+ num_extra_bits = bsr32(enc->slot_base_tab[slot + 1] -
+ enc->slot_base_tab[slot]);
+
+ /* Calculate the extra bits as the offset from the slot base. */
+ extra_bits = value - enc->slot_base_tab[slot];
+
+ /* Output the slot (Huffman-encoded), then the extra bits (verbatim).
+ */
+ lzms_huffman_encode_symbol(enc, slot);
+ lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
+}
+
+/* Encode a literal byte. */
+static void
+lzms_encode_literal(struct lzms_compressor *ctx, u8 literal)
+{
+ LZMS_DEBUG("Position %u: Encoding literal 0x%02x ('%c')",
+ ctx->cur_window_pos, literal, literal);
+
+ /* Main bit: 0 = a literal, not a match. */
+ lzms_range_encode_bit(&ctx->main_range_encoder, 0);
+
+ /* Encode the literal using the current literal Huffman code. */
+ lzms_huffman_encode_symbol(&ctx->literal_encoder, literal);
+}
+
+/* Encode a (length, offset) pair (LZ match). */
+static void
+lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset)
+{
+ int recent_offset_idx;
+
+ LZMS_DEBUG("Position %u: Encoding LZ match {length=%u, offset=%u}",
+ ctx->cur_window_pos, length, offset);
+
+ /* Main bit: 1 = a match, not a literal. */
+ lzms_range_encode_bit(&ctx->main_range_encoder, 1);
+
+ /* Match bit: 0 = a LZ match, not a delta match. */
+ lzms_range_encode_bit(&ctx->match_range_encoder, 0);
+
+ /* Determine if the offset can be represented as a recent offset. */
+ for (recent_offset_idx = 0;
+ recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
+ recent_offset_idx++)
+ if (offset == ctx->recent_lz_offsets[recent_offset_idx])
+ break;
+
+ if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
+ /* Explicit offset. */
+
+ /* LZ match bit: 0 = explicit offset, not a repeat offset. */
+ lzms_range_encode_bit(&ctx->lz_match_range_encoder, 0);
+
+ /* Encode the match offset. */
+ lzms_encode_value(&ctx->lz_offset_encoder, offset);
+ } else {
+ int i;
+
+ /* Repeat offset. */
+
+
+ /* LZ match bit: 0 = repeat offset, not an explicit offset. */
+ lzms_range_encode_bit(&ctx->lz_match_range_encoder, 1);
+
+ /* Encode the recent offset index. A 1 bit is encoded for each
+ * index passed up. This sequence of 1 bits is terminated by a
+ * 0 bit, or automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1
+ * bits have been encoded. */
+ for (i = 0; i < recent_offset_idx - 1; i++)
+ lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 1);
+
+ if (i < LZMS_NUM_RECENT_OFFSETS - 1)
+ lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 0);
+
+ /* Initial update of the LZ match offset LRU queue. */
+ for (i = recent_offset_idx; i < LZMS_NUM_RECENT_OFFSETS; i++)
+ ctx->recent_lz_offsets[i] = ctx->recent_lz_offsets[i + 1];
+ }
+
+ /* Encode the match length. */
+ lzms_encode_value(&ctx->length_encoder, length);
+
+ /* Save the match offset for later insertion at the front of the LZ
+ * match offset LRU queue. */
+ ctx->upcoming_lz_offset = offset;
+}
+
+static struct lzms_match
+lzms_get_best_match(struct lzms_compressor *ctx)
+{
+ struct lzms_match match;
+
+ /* TODO */
+
+ match.length = 0;
+
+ return match;
+}
+
+static void
+lzms_init_range_encoder(struct lzms_range_encoder *enc,
+ struct lzms_range_encoder_raw *rc, u32 num_states)
+{
+ enc->rc = rc;
+ enc->state = 0;
+ enc->mask = num_states - 1;
+ for (u32 i = 0; i < num_states; i++) {
+ enc->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
+ enc->prob_entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
+ }
+}
+
+static void
+lzms_init_huffman_encoder(struct lzms_huffman_encoder *enc,
+ struct lzms_output_bitstream *os,
+ const u32 *slot_base_tab,
+ unsigned num_syms,
+ unsigned rebuild_freq)
+{
+ enc->os = os;
+ enc->slot_base_tab = slot_base_tab;
+ enc->num_syms_written = rebuild_freq;
+ enc->rebuild_freq = rebuild_freq;
+ enc->num_syms = num_syms;
+ for (unsigned i = 0; i < num_syms; i++)
+ enc->sym_freqs[i] = 1;
+}
+
+/* Initialize the LZMS compressor. */
+static void
+lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen,
+ le16 *cdata, u32 clen16)
+{
+ unsigned num_position_slots;
+
+ /* Copy the uncompressed data into the @ctx->window buffer. */
+ memcpy(ctx->window, udata, ulen);
+ ctx->cur_window_pos = 0;
+ ctx->window_size = ulen;
+
+ /* Initialize the raw range encoder (writing forwards). */
+ lzms_range_encoder_raw_init(&ctx->rc, cdata, clen16);
+
+ /* Initialize the output bitstream for Huffman symbols and verbatim bits
+ * (writing backwards). */
+ lzms_output_bitstream_init(&ctx->os, cdata, clen16);
+
+ /* Initialize position and length slot bases if not done already. */
+ lzms_init_slot_bases();
+
+ /* Calculate the number of position slots needed for this compressed
+ * block. */
+ num_position_slots = lzms_get_position_slot(ulen - 1) + 1;
+
+ LZMS_DEBUG("Using %u position slots", num_position_slots);
+
+ /* Initialize Huffman encoders for each alphabet used in the compressed
+ * representation. */
+ lzms_init_huffman_encoder(&ctx->literal_encoder, &ctx->os,
+ NULL, LZMS_NUM_LITERAL_SYMS,
+ LZMS_LITERAL_CODE_REBUILD_FREQ);
+
+ lzms_init_huffman_encoder(&ctx->lz_offset_encoder, &ctx->os,
+ lzms_position_slot_base, num_position_slots,
+ LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
+
+ lzms_init_huffman_encoder(&ctx->length_encoder, &ctx->os,
+ lzms_length_slot_base, LZMS_NUM_LEN_SYMS,
+ LZMS_LENGTH_CODE_REBUILD_FREQ);
+
+ lzms_init_huffman_encoder(&ctx->delta_offset_encoder, &ctx->os,
+ lzms_position_slot_base, num_position_slots,
+ LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
+
+ lzms_init_huffman_encoder(&ctx->delta_power_encoder, &ctx->os,
+ NULL, LZMS_NUM_DELTA_POWER_SYMS,
+ LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
+
+ /* Initialize range encoders, all of which wrap around the same
+ * lzms_range_encoder_raw. */
+ lzms_init_range_encoder(&ctx->main_range_encoder,
+ &ctx->rc, LZMS_NUM_MAIN_STATES);
+
+ lzms_init_range_encoder(&ctx->match_range_encoder,
+ &ctx->rc, LZMS_NUM_MATCH_STATES);
+
+ lzms_init_range_encoder(&ctx->lz_match_range_encoder,
+ &ctx->rc, LZMS_NUM_LZ_MATCH_STATES);
+
+ for (size_t i = 0; i < ARRAY_LEN(ctx->lz_repeat_match_range_encoders); i++)
+ lzms_init_range_encoder(&ctx->lz_repeat_match_range_encoders[i],
+ &ctx->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
+
+ lzms_init_range_encoder(&ctx->delta_match_range_encoder,
+ &ctx->rc, LZMS_NUM_DELTA_MATCH_STATES);
+
+ for (size_t i = 0; i < ARRAY_LEN(ctx->delta_repeat_match_range_encoders); i++)
+ lzms_init_range_encoder(&ctx->delta_repeat_match_range_encoders[i],
+ &ctx->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
+
+ /* Initialize the LRU queue for recent match offsets. */
+ for (size_t i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++)
+ ctx->recent_lz_offsets[i] = i + 1;
+
+ for (size_t i = 0; i < LZMS_NUM_RECENT_OFFSETS + 1; i++) {
+ ctx->recent_delta_powers[i] = 0;
+ ctx->recent_delta_offsets[i] = i + 1;
+ }
+ ctx->prev_lz_offset = 0;
+ ctx->prev_delta_offset = 0;
+ ctx->prev_delta_power = 0;
+ ctx->upcoming_lz_offset = 0;
+ ctx->upcoming_delta_offset = 0;
+ ctx->upcoming_delta_power = 0;
+}
+
+/* Flush the output streams, prepare the final compressed data, and return its
+ * size in bytes.
+ *
+ * A return value of 0 indicates that the data could not be compressed to fit in
+ * the available space. */
+static size_t
+lzms_finalize(struct lzms_compressor *ctx, u8 *cdata, size_t csize_avail)
+{
+ size_t num_forwards_bytes;
+ size_t num_backwards_bytes;
+ size_t compressed_size;
+
+ /* Flush both the forwards and backwards streams, and make sure they
+ * didn't cross each other and start overwriting each other's data. */
+ if (!lzms_output_bitstream_flush(&ctx->os)) {
+ LZMS_DEBUG("Backwards bitstream overrun.");
+ return 0;
+ }
+
+ if (!lzms_range_encoder_raw_flush(&ctx->rc)) {
+ LZMS_DEBUG("Forwards bitstream overrun.");
+ return 0;
+ }
+
+ if (ctx->rc.out > ctx->os.out) {
+ LZMS_DEBUG("Two bitstreams crossed.");
+ return 0;
+ }
+
+ /* Now the compressed buffer contains the data output by the forwards
+ * bitstream, then empty space, then data output by the backwards
+ * bitstream. Move the data output by the forwards bitstream to be
+ * adjacent to the data output by the backwards bitstream, and calculate
+ * the compressed size that this results in. */
+ num_forwards_bytes = (u8*)ctx->rc.out - (u8*)cdata;
+ num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)ctx->os.out;
+
+ memmove(cdata + num_forwards_bytes, ctx->os.out, num_backwards_bytes);
+
+ compressed_size = num_forwards_bytes + num_backwards_bytes;
+ LZMS_DEBUG("num_forwards_bytes=%zu, num_backwards_bytes=%zu, "
+ "compressed_size=%zu",
+ num_forwards_bytes, num_backwards_bytes, compressed_size);
+ LZMS_ASSERT(!(compressed_size & 1));
+ return compressed_size;
+}
+