6 * Copyright (C) 2013, 2014 Eric Biggers
8 * This file is part of wimlib, a library for working with WIM files.
10 * wimlib is free software; you can redistribute it and/or modify it under the
11 * terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option)
15 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
16 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
17 * A PARTICULAR PURPOSE. See the GNU General Public License for more
20 * You should have received a copy of the GNU General Public License
21 * along with wimlib; if not, see http://www.gnu.org/licenses/.
24 /* This a compressor for the LZMS compression format. More details about this
25 * format can be found in lzms-decompress.c.
27 * Also see lzx-compress.c for general information about match-finding and
28 * match-choosing that also applies to this LZMS compressor.
30 * NOTE: this compressor currently does not code any delta matches.
38 #include "wimlib/assert.h"
39 #include "wimlib/compiler.h"
40 #include "wimlib/compressor_ops.h"
41 #include "wimlib/compress_common.h"
42 #include "wimlib/endianness.h"
43 #include "wimlib/error.h"
44 #include "wimlib/lz.h"
45 #include "wimlib/lz_bt.h"
46 #include "wimlib/lzms.h"
47 #include "wimlib/util.h"
53 /* Stucture used for writing raw bits to the end of the LZMS-compressed data as
54 * a series of 16-bit little endian coding units. */
55 struct lzms_output_bitstream {
56 /* Buffer variable containing zero or more bits that have been logically
57 * written to the bitstream but not yet written to memory. This must be
58 * at least as large as the coding unit size. */
61 /* Number of bits in @bitbuf that are valid. */
62 unsigned num_free_bits;
64 /* Pointer to one past the next position in the compressed data buffer
65 * at which to output a 16-bit coding unit. */
68 /* Maximum number of 16-bit coding units that can still be output to
69 * the compressed data buffer. */
70 size_t num_le16_remaining;
72 /* Set to %true if not all coding units could be output due to
73 * insufficient space. */
77 /* Stucture used for range encoding (raw version). */
78 struct lzms_range_encoder_raw {
80 /* A 33-bit variable that holds the low boundary of the current range.
81 * The 33rd bit is needed to catch carries. */
84 /* Size of the current range. */
87 /* Next 16-bit coding unit to output. */
90 /* Number of 16-bit coding units whose output has been delayed due to
91 * possible carrying. The first such coding unit is @cache; all
92 * subsequent such coding units are 0xffff. */
95 /* Pointer to the next position in the compressed data buffer at which
96 * to output a 16-bit coding unit. */
99 /* Maximum number of 16-bit coding units that can still be output to
100 * the compressed data buffer. */
101 size_t num_le16_remaining;
103 /* %true when the very first coding unit has not yet been output. */
106 /* Set to %true if not all coding units could be output due to
107 * insufficient space. */
111 /* Structure used for range encoding. This wraps around `struct
112 * lzms_range_encoder_raw' to use and maintain probability entries. */
113 struct lzms_range_encoder {
114 /* Pointer to the raw range encoder, which has no persistent knowledge
115 * of probabilities. Multiple lzms_range_encoder's share the same
116 * lzms_range_encoder_raw. */
117 struct lzms_range_encoder_raw *rc;
119 /* Bits recently encoded by this range encoder. This are used as in
120 * index into @prob_entries. */
123 /* Bitmask for @state to prevent its value from exceeding the number of
124 * probability entries. */
127 /* Probability entries being used for this range encoder. */
128 struct lzms_probability_entry prob_entries[LZMS_MAX_NUM_STATES];
131 /* Structure used for Huffman encoding. */
132 struct lzms_huffman_encoder {
134 /* Bitstream to write Huffman-encoded symbols and verbatim bits to.
135 * Multiple lzms_huffman_encoder's share the same lzms_output_bitstream.
137 struct lzms_output_bitstream *os;
139 /* Number of symbols that have been written using this code far. Reset
140 * to 0 whenever the code is rebuilt. */
141 u32 num_syms_written;
143 /* When @num_syms_written reaches this number, the Huffman code must be
147 /* Number of symbols in the represented Huffman code. */
150 /* Running totals of symbol frequencies. These are diluted slightly
151 * whenever the code is rebuilt. */
152 u32 sym_freqs[LZMS_MAX_NUM_SYMS];
154 /* The length, in bits, of each symbol in the Huffman code. */
155 u8 lens[LZMS_MAX_NUM_SYMS];
157 /* The codeword of each symbol in the Huffman code. */
158 u32 codewords[LZMS_MAX_NUM_SYMS];
161 /* State of the LZMS compressor. */
162 struct lzms_compressor {
163 struct wimlib_lzms_compressor_params params;
165 /* Pointer to a buffer holding the preprocessed data to compress. */
168 /* Current position in @buffer. */
171 /* Size of the data in @buffer. */
174 /* Binary tree match-finder. */
177 /* Temporary space to store found matches. */
178 struct raw_match *matches;
180 /* Match-chooser data. */
181 struct lzms_mc_pos_data *optimum;
182 unsigned optimum_cur_idx;
183 unsigned optimum_end_idx;
185 /* Maximum block size this compressor instantiation allows. This is the
186 * allocated size of @window. */
189 /* Raw range encoder which outputs to the beginning of the compressed
190 * data buffer, proceeding forwards. */
191 struct lzms_range_encoder_raw rc;
193 /* Bitstream which outputs to the end of the compressed data buffer,
194 * proceeding backwards. */
195 struct lzms_output_bitstream os;
197 /* Range encoders. */
198 struct lzms_range_encoder main_range_encoder;
199 struct lzms_range_encoder match_range_encoder;
200 struct lzms_range_encoder lz_match_range_encoder;
201 struct lzms_range_encoder lz_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
202 struct lzms_range_encoder delta_match_range_encoder;
203 struct lzms_range_encoder delta_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
205 /* Huffman encoders. */
206 struct lzms_huffman_encoder literal_encoder;
207 struct lzms_huffman_encoder lz_offset_encoder;
208 struct lzms_huffman_encoder length_encoder;
209 struct lzms_huffman_encoder delta_power_encoder;
210 struct lzms_huffman_encoder delta_offset_encoder;
212 /* LRU (least-recently-used) queues for match information. */
213 struct lzms_lru_queues lru;
215 /* Used for preprocessing. */
216 s32 last_target_usages[65536];
219 struct lzms_mc_pos_data {
221 #define MC_INFINITE_COST ((u32)~0UL)
232 struct lzms_adaptive_state {
233 struct lzms_lz_lru_queues lru;
237 u8 lz_repeat_match_state[LZMS_NUM_RECENT_OFFSETS - 1];
241 /* Initialize the output bitstream @os to write forwards to the specified
242 * compressed data buffer @out that is @out_limit 16-bit integers long. */
244 lzms_output_bitstream_init(struct lzms_output_bitstream *os,
245 le16 *out, size_t out_limit)
248 os->num_free_bits = 16;
249 os->out = out + out_limit;
250 os->num_le16_remaining = out_limit;
254 /* Write @num_bits bits, contained in the low @num_bits bits of @bits (ordered
255 * from high-order to low-order), to the output bitstream @os. */
257 lzms_output_bitstream_put_bits(struct lzms_output_bitstream *os,
258 u32 bits, unsigned num_bits)
260 bits &= (1U << num_bits) - 1;
262 while (num_bits > os->num_free_bits) {
264 if (unlikely(os->num_le16_remaining == 0)) {
269 unsigned num_fill_bits = os->num_free_bits;
271 os->bitbuf <<= num_fill_bits;
272 os->bitbuf |= bits >> (num_bits - num_fill_bits);
274 *--os->out = cpu_to_le16(os->bitbuf);
275 --os->num_le16_remaining;
277 os->num_free_bits = 16;
278 num_bits -= num_fill_bits;
279 bits &= (1U << num_bits) - 1;
281 os->bitbuf <<= num_bits;
283 os->num_free_bits -= num_bits;
286 /* Flush the output bitstream, ensuring that all bits written to it have been
287 * written to memory. Returns %true if all bits were output successfully, or
288 * %false if an overrun occurred. */
290 lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
292 if (os->num_free_bits != 16)
293 lzms_output_bitstream_put_bits(os, 0, os->num_free_bits + 1);
297 /* Initialize the range encoder @rc to write forwards to the specified
298 * compressed data buffer @out that is @out_limit 16-bit integers long. */
300 lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
301 le16 *out, size_t out_limit)
304 rc->range = 0xffffffff;
308 rc->num_le16_remaining = out_limit;
314 * Attempt to flush bits from the range encoder.
316 * Note: this is based on the public domain code for LZMA written by Igor
317 * Pavlov. The only differences in this function are that in LZMS the bits must
318 * be output in 16-bit coding units instead of 8-bit coding units, and that in
319 * LZMS the first coding unit is not ignored by the decompressor, so the encoder
320 * cannot output a dummy value to that position.
322 * The basic idea is that we're writing bits from @rc->low to the output.
323 * However, due to carrying, the writing of coding units with value 0xffff, as
324 * well as one prior coding unit, must be delayed until it is determined whether
328 lzms_range_encoder_raw_shift_low(struct lzms_range_encoder_raw *rc)
330 LZMS_DEBUG("low=%"PRIx64", cache=%"PRIx64", cache_size=%u",
331 rc->low, rc->cache, rc->cache_size);
332 if ((u32)(rc->low) < 0xffff0000 ||
333 (u32)(rc->low >> 32) != 0)
335 /* Carry not needed (rc->low < 0xffff0000), or carry occurred
336 * ((rc->low >> 32) != 0, a.k.a. the carry bit is 1). */
339 if (rc->num_le16_remaining == 0) {
343 *rc->out++ = cpu_to_le16(rc->cache +
344 (u16)(rc->low >> 32));
345 --rc->num_le16_remaining;
351 } while (--rc->cache_size != 0);
353 rc->cache = (rc->low >> 16) & 0xffff;
356 rc->low = (rc->low & 0xffff) << 16;
360 lzms_range_encoder_raw_normalize(struct lzms_range_encoder_raw *rc)
362 if (rc->range <= 0xffff) {
364 lzms_range_encoder_raw_shift_low(rc);
369 lzms_range_encoder_raw_flush(struct lzms_range_encoder_raw *rc)
371 for (unsigned i = 0; i < 4; i++)
372 lzms_range_encoder_raw_shift_low(rc);
376 /* Encode the next bit using the range encoder (raw version).
378 * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0. */
380 lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc, int bit,
383 lzms_range_encoder_raw_normalize(rc);
385 u32 bound = (rc->range >> LZMS_PROBABILITY_BITS) * prob;
394 /* Encode a bit using the specified range encoder. This wraps around
395 * lzms_range_encoder_raw_encode_bit() to handle using and updating the
396 * appropriate probability table. */
398 lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
400 struct lzms_probability_entry *prob_entry;
403 /* Load the probability entry corresponding to the current state. */
404 prob_entry = &enc->prob_entries[enc->state];
406 /* Treat the number of zero bits in the most recently encoded
407 * LZMS_PROBABILITY_MAX bits with this probability entry as the chance,
408 * out of LZMS_PROBABILITY_MAX, that the next bit will be a 0. However,
409 * don't allow 0% or 100% probabilities. */
410 prob = prob_entry->num_recent_zero_bits;
413 else if (prob == LZMS_PROBABILITY_MAX)
414 prob = LZMS_PROBABILITY_MAX - 1;
416 /* Encode the next bit. */
417 lzms_range_encoder_raw_encode_bit(enc->rc, bit, prob);
419 /* Update the state based on the newly encoded bit. */
420 enc->state = ((enc->state << 1) | bit) & enc->mask;
422 /* Update the recent bits, including the cached count of 0's. */
423 BUILD_BUG_ON(LZMS_PROBABILITY_MAX > sizeof(prob_entry->recent_bits) * 8);
425 if (prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1))) {
426 /* Replacing 1 bit with 0 bit; increment the zero count.
428 prob_entry->num_recent_zero_bits++;
431 if (!(prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1)))) {
432 /* Replacing 0 bit with 1 bit; decrement the zero count.
434 prob_entry->num_recent_zero_bits--;
437 prob_entry->recent_bits = (prob_entry->recent_bits << 1) | bit;
440 /* Encode a symbol using the specified Huffman encoder. */
442 lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, u32 sym)
444 LZMS_ASSERT(sym < enc->num_syms);
445 lzms_output_bitstream_put_bits(enc->os,
448 ++enc->sym_freqs[sym];
449 if (++enc->num_syms_written == enc->rebuild_freq) {
450 /* Adaptive code needs to be rebuilt. */
451 LZMS_DEBUG("Rebuilding code (num_syms=%u)", enc->num_syms);
452 make_canonical_huffman_code(enc->num_syms,
453 LZMS_MAX_CODEWORD_LEN,
458 /* Dilute the frequencies. */
459 for (unsigned i = 0; i < enc->num_syms; i++) {
460 enc->sym_freqs[i] >>= 1;
461 enc->sym_freqs[i] += 1;
463 enc->num_syms_written = 0;
468 lzms_encode_length(struct lzms_huffman_encoder *enc, u32 length)
471 unsigned num_extra_bits;
474 slot = lzms_get_length_slot(length);
476 num_extra_bits = lzms_extra_length_bits[slot];
478 extra_bits = length - lzms_length_slot_base[slot];
480 lzms_huffman_encode_symbol(enc, slot);
481 lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
485 lzms_encode_offset(struct lzms_huffman_encoder *enc, u32 offset)
488 unsigned num_extra_bits;
491 slot = lzms_get_position_slot(offset);
493 num_extra_bits = lzms_extra_position_bits[slot];
495 extra_bits = offset - lzms_position_slot_base[slot];
497 lzms_huffman_encode_symbol(enc, slot);
498 lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
502 lzms_begin_encode_item(struct lzms_compressor *ctx)
504 ctx->lru.lz.upcoming_offset = 0;
505 ctx->lru.delta.upcoming_offset = 0;
506 ctx->lru.delta.upcoming_power = 0;
510 lzms_end_encode_item(struct lzms_compressor *ctx, u32 length)
512 LZMS_ASSERT(ctx->window_size - ctx->cur_window_pos >= length);
513 ctx->cur_window_pos += length;
514 lzms_update_lru_queues(&ctx->lru);
517 /* Encode a literal byte. */
519 lzms_encode_literal(struct lzms_compressor *ctx, u8 literal)
521 LZMS_DEBUG("Position %u: Encoding literal 0x%02x ('%c')",
522 ctx->cur_window_pos, literal, literal);
524 lzms_begin_encode_item(ctx);
526 /* Main bit: 0 = a literal, not a match. */
527 lzms_range_encode_bit(&ctx->main_range_encoder, 0);
529 /* Encode the literal using the current literal Huffman code. */
530 lzms_huffman_encode_symbol(&ctx->literal_encoder, literal);
532 lzms_end_encode_item(ctx, 1);
535 /* Encode a (length, offset) pair (LZ match). */
537 lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset)
539 int recent_offset_idx;
541 LZMS_DEBUG("Position %u: Encoding LZ match {length=%u, offset=%u}",
542 ctx->cur_window_pos, length, offset);
544 LZMS_ASSERT(length <= ctx->window_size - ctx->cur_window_pos);
545 LZMS_ASSERT(offset <= ctx->cur_window_pos);
546 LZMS_ASSERT(!memcmp(&ctx->window[ctx->cur_window_pos],
547 &ctx->window[ctx->cur_window_pos - offset],
550 lzms_begin_encode_item(ctx);
552 /* Main bit: 1 = a match, not a literal. */
553 lzms_range_encode_bit(&ctx->main_range_encoder, 1);
555 /* Match bit: 0 = an LZ match, not a delta match. */
556 lzms_range_encode_bit(&ctx->match_range_encoder, 0);
558 /* Determine if the offset can be represented as a recent offset. */
559 for (recent_offset_idx = 0;
560 recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
562 if (offset == ctx->lru.lz.recent_offsets[recent_offset_idx])
565 if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
566 /* Explicit offset. */
568 /* LZ match bit: 0 = explicit offset, not a recent offset. */
569 lzms_range_encode_bit(&ctx->lz_match_range_encoder, 0);
571 /* Encode the match offset. */
572 lzms_encode_offset(&ctx->lz_offset_encoder, offset);
578 /* LZ match bit: 1 = recent offset, not an explicit offset. */
579 lzms_range_encode_bit(&ctx->lz_match_range_encoder, 1);
581 /* Encode the recent offset index. A 1 bit is encoded for each
582 * index passed up. This sequence of 1 bits is terminated by a
583 * 0 bit, or automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1
584 * bits have been encoded. */
585 for (i = 0; i < recent_offset_idx; i++)
586 lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 1);
588 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
589 lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 0);
591 /* Initial update of the LZ match offset LRU queue. */
592 for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
593 ctx->lru.lz.recent_offsets[i] = ctx->lru.lz.recent_offsets[i + 1];
596 /* Encode the match length. */
597 lzms_encode_length(&ctx->length_encoder, length);
599 /* Save the match offset for later insertion at the front of the LZ
600 * match offset LRU queue. */
601 ctx->lru.lz.upcoming_offset = offset;
603 lzms_end_encode_item(ctx, length);
606 #define LZMS_COST_SHIFT 5
608 /*#define LZMS_RC_COSTS_USE_FLOATING_POINT*/
611 lzms_rc_costs[LZMS_PROBABILITY_MAX + 1];
613 #ifdef LZMS_RC_COSTS_USE_FLOATING_POINT
618 lzms_do_init_rc_costs(void)
620 /* Fill in a table that maps range coding probabilities needed to code a
621 * bit X (0 or 1) to the number of bits (scaled by a constant factor, to
622 * handle fractional costs) needed to code that bit X.
624 * Consider the range of the range decoder. To eliminate exactly half
625 * the range (logical probability of 0.5), we need exactly 1 bit. For
626 * lower probabilities we need more bits and for higher probabilities we
627 * need fewer bits. In general, a logical probability of N will
628 * eliminate the proportion 1 - N of the range; this information takes
629 * log2(1 / N) bits to encode.
631 * The below loop is simply calculating this number of bits for each
632 * possible probability allowed by the LZMS compression format, but
633 * without using real numbers. To handle fractional probabilities, each
634 * cost is multiplied by (1 << LZMS_COST_SHIFT). These techniques are
635 * based on those used by LZMA.
637 * Note that in LZMS, a probability x really means x / 64, and 0 / 64 is
638 * really interpreted as 1 / 64 and 64 / 64 is really interpreted as
641 for (u32 i = 0; i <= LZMS_PROBABILITY_MAX; i++) {
646 else if (prob == LZMS_PROBABILITY_MAX)
647 prob = LZMS_PROBABILITY_MAX - 1;
649 #ifdef LZMS_RC_COSTS_USE_FLOATING_POINT
650 lzms_rc_costs[i] = log2((double)LZMS_PROBABILITY_MAX / prob) *
651 (1 << LZMS_COST_SHIFT);
655 for (u32 j = 0; j < LZMS_COST_SHIFT; j++) {
658 while (w >= (1U << 16)) {
663 lzms_rc_costs[i] = (LZMS_PROBABILITY_BITS << LZMS_COST_SHIFT) -
670 lzms_init_rc_costs(void)
672 static bool done = false;
673 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
675 if (unlikely(!done)) {
676 pthread_mutex_lock(&mutex);
678 lzms_do_init_rc_costs();
681 pthread_mutex_unlock(&mutex);
686 * Return the cost to range-encode the specified bit when in the specified
689 * @enc The range encoder to use.
690 * @cur_state Current state, which indicates the probability entry to choose.
691 * Updated by this function.
692 * @bit The bit to encode (0 or 1).
695 lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 *cur_state, int bit)
700 prob_zero = enc->prob_entries[*cur_state & enc->mask].num_recent_zero_bits;
702 *cur_state = (*cur_state << 1) | bit;
705 prob_correct = prob_zero;
707 prob_correct = LZMS_PROBABILITY_MAX - prob_zero;
709 return lzms_rc_costs[prob_correct];
713 lzms_huffman_symbol_cost(const struct lzms_huffman_encoder *enc, u32 sym)
715 return enc->lens[sym] << LZMS_COST_SHIFT;
719 lzms_offset_cost(const struct lzms_huffman_encoder *enc, u32 offset)
725 slot = lzms_get_position_slot(offset);
727 cost += lzms_huffman_symbol_cost(enc, slot);
729 num_extra_bits = lzms_extra_position_bits[slot];
731 cost += num_extra_bits << LZMS_COST_SHIFT;
737 lzms_length_cost(const struct lzms_huffman_encoder *enc, u32 length)
743 slot = lzms_get_length_slot(length);
745 cost += lzms_huffman_symbol_cost(enc, slot);
747 num_extra_bits = lzms_extra_length_bits[slot];
749 cost += num_extra_bits << LZMS_COST_SHIFT;
755 lzms_get_matches(struct lzms_compressor *ctx, struct raw_match **matches_ret)
757 *matches_ret = ctx->matches;
758 return lz_bt_get_matches(&ctx->mf, ctx->matches);
762 lzms_skip_bytes(struct lzms_compressor *ctx, u32 n)
764 lz_bt_skip_positions(&ctx->mf, n);
768 lzms_get_literal_cost(struct lzms_compressor *ctx,
769 struct lzms_adaptive_state *state, u8 literal)
773 state->lru.upcoming_offset = 0;
774 lzms_update_lz_lru_queues(&state->lru);
776 cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
777 &state->main_state, 0);
779 cost += lzms_huffman_symbol_cost(&ctx->literal_encoder, literal);
785 lzms_get_lz_match_cost(struct lzms_compressor *ctx,
786 struct lzms_adaptive_state *state,
787 u32 length, u32 offset)
790 int recent_offset_idx;
792 cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
793 &state->main_state, 1);
794 cost += lzms_rc_bit_cost(&ctx->match_range_encoder,
795 &state->match_state, 0);
797 for (recent_offset_idx = 0;
798 recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
800 if (offset == state->lru.recent_offsets[recent_offset_idx])
803 if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
804 /* Explicit offset. */
805 cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
806 &state->lz_match_state, 0);
808 cost += lzms_offset_cost(&ctx->lz_offset_encoder, offset);
813 cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
814 &state->lz_match_state, 1);
816 for (i = 0; i < recent_offset_idx; i++)
817 cost += lzms_rc_bit_cost(&ctx->lz_repeat_match_range_encoders[i],
818 &state->lz_repeat_match_state[i], 0);
820 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
821 cost += lzms_rc_bit_cost(&ctx->lz_repeat_match_range_encoders[i],
822 &state->lz_repeat_match_state[i], 1);
825 /* Initial update of the LZ match offset LRU queue. */
826 for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
827 state->lru.recent_offsets[i] = state->lru.recent_offsets[i + 1];
830 cost += lzms_length_cost(&ctx->length_encoder, length);
832 state->lru.upcoming_offset = offset;
833 lzms_update_lz_lru_queues(&state->lru);
838 static struct raw_match
839 lzms_match_chooser_reverse_list(struct lzms_compressor *ctx, unsigned cur_pos)
841 unsigned prev_link, saved_prev_link;
842 unsigned prev_match_offset, saved_prev_match_offset;
844 ctx->optimum_end_idx = cur_pos;
846 saved_prev_link = ctx->optimum[cur_pos].prev.link;
847 saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
850 prev_link = saved_prev_link;
851 prev_match_offset = saved_prev_match_offset;
853 saved_prev_link = ctx->optimum[prev_link].prev.link;
854 saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
856 ctx->optimum[prev_link].next.link = cur_pos;
857 ctx->optimum[prev_link].next.match_offset = prev_match_offset;
860 } while (cur_pos != 0);
862 ctx->optimum_cur_idx = ctx->optimum[0].next.link;
864 return (struct raw_match)
865 { .len = ctx->optimum_cur_idx,
866 .offset = ctx->optimum[0].next.match_offset,
870 /* This is similar to lzx_get_near_optimal_match() in lzx-compress.c.
871 * Read that one if you want to understand it. */
872 static struct raw_match
873 lzms_get_near_optimal_match(struct lzms_compressor *ctx)
876 struct raw_match *matches;
877 struct raw_match match;
880 u32 longest_rep_offset;
881 struct raw_match *matchptr;
884 struct lzms_adaptive_state initial_state;
886 if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
887 match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
888 ctx->optimum_cur_idx;
889 match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
891 ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
895 ctx->optimum_cur_idx = 0;
896 ctx->optimum_end_idx = 0;
898 longest_rep_len = ctx->params.min_match_length - 1;
899 if (lz_bt_get_position(&ctx->mf) >= 1) {
900 u32 limit = min(ctx->params.max_match_length,
901 lz_bt_get_remaining_size(&ctx->mf));
902 for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++) {
903 u32 offset = ctx->lru.lz.recent_offsets[i];
904 const u8 *strptr = lz_bt_get_window_ptr(&ctx->mf);
905 const u8 *matchptr = strptr - offset;
907 while (len < limit && strptr[len] == matchptr[len])
909 if (len > longest_rep_len) {
910 longest_rep_len = len;
911 longest_rep_offset = offset;
916 if (longest_rep_len >= ctx->params.nice_match_length) {
917 lzms_skip_bytes(ctx, longest_rep_len);
918 return (struct raw_match) {
919 .len = longest_rep_len,
920 .offset = longest_rep_offset,
924 num_matches = lzms_get_matches(ctx, &matches);
927 longest_len = matches[num_matches - 1].len;
928 if (longest_len >= ctx->params.nice_match_length) {
929 lzms_skip_bytes(ctx, longest_len - 1);
930 return matches[num_matches - 1];
936 initial_state.lru = ctx->lru.lz;
937 initial_state.main_state = ctx->main_range_encoder.state;
938 initial_state.match_state = ctx->match_range_encoder.state;
939 initial_state.lz_match_state = ctx->lz_match_range_encoder.state;
940 for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
941 initial_state.lz_repeat_match_state[i] = ctx->lz_repeat_match_range_encoders[i].state;
943 ctx->optimum[1].state = initial_state;
944 ctx->optimum[1].cost = lzms_get_literal_cost(ctx,
945 &ctx->optimum[1].state,
946 *(lz_bt_get_window_ptr(&ctx->mf) - 1));
947 ctx->optimum[1].prev.link = 0;
950 for (u32 len = 2; len <= longest_len; len++) {
951 u32 offset = matchptr->offset;
953 ctx->optimum[len].state = initial_state;
954 ctx->optimum[len].prev.link = 0;
955 ctx->optimum[len].prev.match_offset = offset;
956 ctx->optimum[len].cost = lzms_get_lz_match_cost(ctx,
957 &ctx->optimum[len].state,
959 if (len == matchptr->len)
962 end_pos = longest_len;
964 if (longest_rep_len >= ctx->params.min_match_length) {
965 struct lzms_adaptive_state state;
968 while (end_pos < longest_rep_len)
969 ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
971 state = initial_state;
972 cost = lzms_get_lz_match_cost(ctx,
976 if (cost <= ctx->optimum[longest_rep_len].cost) {
977 ctx->optimum[longest_rep_len].state = state;
978 ctx->optimum[longest_rep_len].prev.link = 0;
979 ctx->optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
980 ctx->optimum[longest_rep_len].cost = cost;
987 struct lzms_adaptive_state state;
991 if (cur_pos == end_pos || cur_pos == ctx->params.optim_array_length)
992 return lzms_match_chooser_reverse_list(ctx, cur_pos);
994 longest_rep_len = ctx->params.min_match_length - 1;
995 u32 limit = min(ctx->params.max_match_length,
996 lz_bt_get_remaining_size(&ctx->mf));
997 for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++) {
998 u32 offset = ctx->optimum[cur_pos].state.lru.recent_offsets[i];
999 const u8 *strptr = lz_bt_get_window_ptr(&ctx->mf);
1000 const u8 *matchptr = strptr - offset;
1002 while (len < limit && strptr[len] == matchptr[len])
1004 if (len > longest_rep_len) {
1005 longest_rep_len = len;
1006 longest_rep_offset = offset;
1010 if (longest_rep_len >= ctx->params.nice_match_length) {
1011 match = lzms_match_chooser_reverse_list(ctx, cur_pos);
1013 ctx->optimum[cur_pos].next.match_offset = longest_rep_offset;
1014 ctx->optimum[cur_pos].next.link = cur_pos + longest_rep_len;
1015 ctx->optimum_end_idx = cur_pos + longest_rep_len;
1017 lzms_skip_bytes(ctx, longest_rep_len);
1022 num_matches = lzms_get_matches(ctx, &matches);
1025 longest_len = matches[num_matches - 1].len;
1026 if (longest_len >= ctx->params.nice_match_length) {
1027 match = lzms_match_chooser_reverse_list(ctx, cur_pos);
1029 ctx->optimum[cur_pos].next.match_offset =
1030 matches[num_matches - 1].offset;
1031 ctx->optimum[cur_pos].next.link = cur_pos + longest_len;
1032 ctx->optimum_end_idx = cur_pos + longest_len;
1034 lzms_skip_bytes(ctx, longest_len - 1);
1042 while (end_pos < cur_pos + longest_len)
1043 ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
1045 state = ctx->optimum[cur_pos].state;
1046 cost = ctx->optimum[cur_pos].cost +
1047 lzms_get_literal_cost(ctx,
1049 *(lz_bt_get_window_ptr(&ctx->mf) - 1));
1050 if (cost < ctx->optimum[cur_pos + 1].cost) {
1051 ctx->optimum[cur_pos + 1].state = state;
1052 ctx->optimum[cur_pos + 1].cost = cost;
1053 ctx->optimum[cur_pos + 1].prev.link = cur_pos;
1057 for (u32 len = 2; len <= longest_len; len++) {
1060 offset = matchptr->offset;
1061 state = ctx->optimum[cur_pos].state;
1063 cost = ctx->optimum[cur_pos].cost +
1064 lzms_get_lz_match_cost(ctx, &state, len, offset);
1065 if (cost < ctx->optimum[cur_pos + len].cost) {
1066 ctx->optimum[cur_pos + len].state = state;
1067 ctx->optimum[cur_pos + len].prev.link = cur_pos;
1068 ctx->optimum[cur_pos + len].prev.match_offset = offset;
1069 ctx->optimum[cur_pos + len].cost = cost;
1071 if (len == matchptr->len)
1075 if (longest_rep_len >= ctx->params.min_match_length) {
1077 while (end_pos < cur_pos + longest_rep_len)
1078 ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
1080 state = ctx->optimum[cur_pos].state;
1082 cost = ctx->optimum[cur_pos].cost +
1083 lzms_get_lz_match_cost(ctx,
1086 longest_rep_offset);
1087 if (cost <= ctx->optimum[cur_pos + longest_rep_len].cost) {
1088 ctx->optimum[cur_pos + longest_rep_len].state =
1090 ctx->optimum[cur_pos + longest_rep_len].prev.link =
1092 ctx->optimum[cur_pos + longest_rep_len].prev.match_offset =
1094 ctx->optimum[cur_pos + longest_rep_len].cost =
1102 * The main loop for the LZMS compressor.
1106 * - This uses near-optimal LZ parsing backed by a binary tree match-finder.
1108 * - This does not output any delta matches.
1110 * - The costs of literals and matches are estimated using the range encoder
1111 * states and the semi-adaptive Huffman codes. Except for range encoding
1112 * states, costs are assumed to be constant throughout a single run of the
1113 * parsing algorithm, which can parse up to @optim_array_length (from the
1114 * `struct wimlib_lzms_compressor_params') bytes of data. This introduces a
1115 * source of inaccuracy because the probabilities and Huffman codes can change
1116 * over this part of the data.
1119 lzms_encode(struct lzms_compressor *ctx)
1121 struct raw_match match;
1123 /* Load window into the binary tree match-finder. */
1124 lz_bt_load_window(&ctx->mf, ctx->window, ctx->window_size);
1126 /* Reset the match-chooser. */
1127 ctx->optimum_cur_idx = 0;
1128 ctx->optimum_end_idx = 0;
1130 while (ctx->cur_window_pos != ctx->window_size) {
1131 match = lzms_get_near_optimal_match(ctx);
1133 lzms_encode_literal(ctx, ctx->window[ctx->cur_window_pos]);
1135 lzms_encode_lz_match(ctx, match.len, match.offset);
1140 lzms_init_range_encoder(struct lzms_range_encoder *enc,
1141 struct lzms_range_encoder_raw *rc, u32 num_states)
1145 enc->mask = num_states - 1;
1146 for (u32 i = 0; i < num_states; i++) {
1147 enc->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
1148 enc->prob_entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
1153 lzms_init_huffman_encoder(struct lzms_huffman_encoder *enc,
1154 struct lzms_output_bitstream *os,
1156 unsigned rebuild_freq)
1159 enc->num_syms_written = 0;
1160 enc->rebuild_freq = rebuild_freq;
1161 enc->num_syms = num_syms;
1162 for (unsigned i = 0; i < num_syms; i++)
1163 enc->sym_freqs[i] = 1;
1165 make_canonical_huffman_code(enc->num_syms,
1166 LZMS_MAX_CODEWORD_LEN,
1172 /* Initialize the LZMS compressor. */
1174 lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen,
1175 le16 *cdata, u32 clen16)
1177 unsigned num_position_slots;
1179 /* Copy the uncompressed data into the @ctx->window buffer. */
1180 memcpy(ctx->window, udata, ulen);
1181 ctx->cur_window_pos = 0;
1182 ctx->window_size = ulen;
1184 /* Initialize the raw range encoder (writing forwards). */
1185 lzms_range_encoder_raw_init(&ctx->rc, cdata, clen16);
1187 /* Initialize the output bitstream for Huffman symbols and verbatim bits
1188 * (writing backwards). */
1189 lzms_output_bitstream_init(&ctx->os, cdata, clen16);
1191 /* Calculate the number of position slots needed for this compressed
1193 num_position_slots = lzms_get_position_slot(ulen - 1) + 1;
1195 LZMS_DEBUG("Using %u position slots", num_position_slots);
1197 /* Initialize Huffman encoders for each alphabet used in the compressed
1198 * representation. */
1199 lzms_init_huffman_encoder(&ctx->literal_encoder, &ctx->os,
1200 LZMS_NUM_LITERAL_SYMS,
1201 LZMS_LITERAL_CODE_REBUILD_FREQ);
1203 lzms_init_huffman_encoder(&ctx->lz_offset_encoder, &ctx->os,
1205 LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
1207 lzms_init_huffman_encoder(&ctx->length_encoder, &ctx->os,
1209 LZMS_LENGTH_CODE_REBUILD_FREQ);
1211 lzms_init_huffman_encoder(&ctx->delta_offset_encoder, &ctx->os,
1213 LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
1215 lzms_init_huffman_encoder(&ctx->delta_power_encoder, &ctx->os,
1216 LZMS_NUM_DELTA_POWER_SYMS,
1217 LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
1219 /* Initialize range encoders, all of which wrap around the same
1220 * lzms_range_encoder_raw. */
1221 lzms_init_range_encoder(&ctx->main_range_encoder,
1222 &ctx->rc, LZMS_NUM_MAIN_STATES);
1224 lzms_init_range_encoder(&ctx->match_range_encoder,
1225 &ctx->rc, LZMS_NUM_MATCH_STATES);
1227 lzms_init_range_encoder(&ctx->lz_match_range_encoder,
1228 &ctx->rc, LZMS_NUM_LZ_MATCH_STATES);
1230 for (size_t i = 0; i < ARRAY_LEN(ctx->lz_repeat_match_range_encoders); i++)
1231 lzms_init_range_encoder(&ctx->lz_repeat_match_range_encoders[i],
1232 &ctx->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
1234 lzms_init_range_encoder(&ctx->delta_match_range_encoder,
1235 &ctx->rc, LZMS_NUM_DELTA_MATCH_STATES);
1237 for (size_t i = 0; i < ARRAY_LEN(ctx->delta_repeat_match_range_encoders); i++)
1238 lzms_init_range_encoder(&ctx->delta_repeat_match_range_encoders[i],
1239 &ctx->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
1241 /* Initialize LRU match information. */
1242 lzms_init_lru_queues(&ctx->lru);
1245 /* Flush the output streams, prepare the final compressed data, and return its
1248 * A return value of 0 indicates that the data could not be compressed to fit in
1249 * the available space. */
1251 lzms_finalize(struct lzms_compressor *ctx, u8 *cdata, size_t csize_avail)
1253 size_t num_forwards_bytes;
1254 size_t num_backwards_bytes;
1255 size_t compressed_size;
1257 /* Flush both the forwards and backwards streams, and make sure they
1258 * didn't cross each other and start overwriting each other's data. */
1259 if (!lzms_output_bitstream_flush(&ctx->os)) {
1260 LZMS_DEBUG("Backwards bitstream overrun.");
1264 if (!lzms_range_encoder_raw_flush(&ctx->rc)) {
1265 LZMS_DEBUG("Forwards bitstream overrun.");
1269 if (ctx->rc.out > ctx->os.out) {
1270 LZMS_DEBUG("Two bitstreams crossed.");
1274 /* Now the compressed buffer contains the data output by the forwards
1275 * bitstream, then empty space, then data output by the backwards
1276 * bitstream. Move the data output by the backwards bitstream to be
1277 * adjacent to the data output by the forward bitstream, and calculate
1278 * the compressed size that this results in. */
1279 num_forwards_bytes = (u8*)ctx->rc.out - (u8*)cdata;
1280 num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)ctx->os.out;
1282 memmove(cdata + num_forwards_bytes, ctx->os.out, num_backwards_bytes);
1284 compressed_size = num_forwards_bytes + num_backwards_bytes;
1285 LZMS_DEBUG("num_forwards_bytes=%zu, num_backwards_bytes=%zu, "
1286 "compressed_size=%zu",
1287 num_forwards_bytes, num_backwards_bytes, compressed_size);
1288 LZMS_ASSERT(compressed_size % 2 == 0);
1289 return compressed_size;
1293 lzms_compress(const void *uncompressed_data, size_t uncompressed_size,
1294 void *compressed_data, size_t compressed_size_avail, void *_ctx)
1296 struct lzms_compressor *ctx = _ctx;
1297 size_t compressed_size;
1299 LZMS_DEBUG("uncompressed_size=%zu, compressed_size_avail=%zu",
1300 uncompressed_size, compressed_size_avail);
1302 /* Make sure the uncompressed size is compatible with this compressor.
1304 if (uncompressed_size > ctx->max_block_size) {
1305 LZMS_DEBUG("Can't compress %zu bytes: LZMS context "
1306 "only supports %u bytes",
1307 uncompressed_size, ctx->max_block_size);
1311 /* Don't bother compressing extremely small inputs. */
1312 if (uncompressed_size < 4) {
1313 LZMS_DEBUG("Input too small to bother compressing.");
1317 /* Cap the available compressed size to a 32-bit integer and round it
1318 * down to the nearest multiple of 2. */
1319 if (compressed_size_avail > UINT32_MAX)
1320 compressed_size_avail = UINT32_MAX;
1321 if (compressed_size_avail & 1)
1322 compressed_size_avail--;
1324 /* Initialize the compressor structures. */
1325 lzms_init_compressor(ctx, uncompressed_data, uncompressed_size,
1326 compressed_data, compressed_size_avail / 2);
1328 /* Preprocess the uncompressed data. */
1329 lzms_x86_filter(ctx->window, ctx->window_size,
1330 ctx->last_target_usages, false);
1332 /* Compute and encode a literal/match sequence that decompresses to the
1333 * preprocessed data. */
1336 /* Get and return the compressed data size. */
1337 compressed_size = lzms_finalize(ctx, compressed_data,
1338 compressed_size_avail);
1340 if (compressed_size == 0) {
1341 LZMS_DEBUG("Data did not compress to requested size or less.");
1345 LZMS_DEBUG("Compressed %zu => %zu bytes",
1346 uncompressed_size, compressed_size);
1348 #if defined(ENABLE_VERIFY_COMPRESSION) || defined(ENABLE_LZMS_DEBUG)
1349 /* Verify that we really get the same thing back when decompressing. */
1351 struct wimlib_decompressor *decompressor;
1353 LZMS_DEBUG("Verifying LZMS compression.");
1355 if (0 == wimlib_create_decompressor(WIMLIB_COMPRESSION_TYPE_LZMS,
1356 ctx->max_block_size,
1361 ret = wimlib_decompress(compressed_data,
1366 wimlib_free_decompressor(decompressor);
1369 ERROR("Failed to decompress data we "
1370 "compressed using LZMS algorithm");
1374 if (memcmp(uncompressed_data, ctx->window,
1377 ERROR("Data we compressed using LZMS algorithm "
1378 "didn't decompress to original");
1383 WARNING("Failed to create decompressor for "
1384 "data verification!");
1387 #endif /* ENABLE_LZMS_DEBUG || ENABLE_VERIFY_COMPRESSION */
1389 return compressed_size;
1393 lzms_free_compressor(void *_ctx)
1395 struct lzms_compressor *ctx = _ctx;
1400 lz_bt_destroy(&ctx->mf);
1406 static const struct wimlib_lzms_compressor_params lzms_default = {
1408 .size = sizeof(struct wimlib_lzms_compressor_params),
1410 .min_match_length = 2,
1411 .max_match_length = UINT32_MAX,
1412 .nice_match_length = 32,
1413 .max_search_depth = 50,
1414 .optim_array_length = 1024,
1418 lzms_params_valid(const struct wimlib_compressor_params_header *);
1420 static const struct wimlib_lzms_compressor_params *
1421 lzms_get_params(const struct wimlib_compressor_params_header *_params)
1423 const struct wimlib_lzms_compressor_params *params =
1424 (const struct wimlib_lzms_compressor_params*)_params;
1427 params = &lzms_default;
1429 LZMS_ASSERT(lzms_params_valid(¶ms->hdr));
1435 lzms_create_compressor(size_t max_block_size,
1436 const struct wimlib_compressor_params_header *_params,
1439 struct lzms_compressor *ctx;
1440 const struct wimlib_lzms_compressor_params *params = lzms_get_params(_params);
1442 if (max_block_size == 0 || max_block_size >= INT32_MAX) {
1443 LZMS_DEBUG("Invalid max_block_size (%u)", max_block_size);
1444 return WIMLIB_ERR_INVALID_PARAM;
1447 ctx = CALLOC(1, sizeof(struct lzms_compressor));
1451 ctx->window = MALLOC(max_block_size);
1452 if (ctx->window == NULL)
1455 ctx->matches = MALLOC(min(params->max_match_length -
1456 params->min_match_length + 1,
1457 params->max_search_depth + 2) *
1458 sizeof(ctx->matches[0]));
1459 if (ctx->matches == NULL)
1462 if (!lz_bt_init(&ctx->mf,
1464 params->min_match_length,
1465 params->max_match_length,
1466 params->nice_match_length,
1467 params->max_search_depth))
1470 ctx->optimum = MALLOC((params->optim_array_length +
1471 min(params->nice_match_length,
1472 params->max_match_length)) *
1473 sizeof(ctx->optimum[0]));
1477 /* Initialize position and length slot data if not done already. */
1480 /* Initialize range encoding cost table if not done already. */
1481 lzms_init_rc_costs();
1483 ctx->max_block_size = max_block_size;
1484 memcpy(&ctx->params, params, sizeof(*params));
1490 lzms_free_compressor(ctx);
1491 return WIMLIB_ERR_NOMEM;
1495 lzms_get_needed_memory(size_t max_block_size,
1496 const struct wimlib_compressor_params_header *_params)
1498 const struct wimlib_lzms_compressor_params *params = lzms_get_params(_params);
1502 size += max_block_size;
1503 size += sizeof(struct lzms_compressor);
1504 size += lz_bt_get_needed_memory(max_block_size);
1505 size += (params->optim_array_length +
1506 min(params->nice_match_length,
1507 params->max_match_length)) *
1508 sizeof(((struct lzms_compressor *)0)->optimum[0]);
1509 size += min(params->max_match_length - params->min_match_length + 1,
1510 params->max_search_depth + 2) *
1511 sizeof(((struct lzms_compressor*)0)->matches[0]);
1516 lzms_params_valid(const struct wimlib_compressor_params_header *_params)
1518 const struct wimlib_lzms_compressor_params *params =
1519 (const struct wimlib_lzms_compressor_params*)_params;
1521 if (params->hdr.size != sizeof(*params) ||
1522 params->max_match_length < params->min_match_length ||
1523 params->min_match_length < 2 ||
1524 params->optim_array_length == 0 ||
1525 min(params->max_match_length, params->nice_match_length) > 65536)
1531 const struct compressor_ops lzms_compressor_ops = {
1532 .params_valid = lzms_params_valid,
1533 .get_needed_memory = lzms_get_needed_memory,
1534 .create_compressor = lzms_create_compressor,
1535 .compress = lzms_compress,
1536 .free_compressor = lzms_free_compressor,