6 * Copyright (C) 2013 Eric Biggers
8 * This file is part of wimlib, a library for working with WIM files.
10 * wimlib is free software; you can redistribute it and/or modify it under the
11 * terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option)
15 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
16 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
17 * A PARTICULAR PURPOSE. See the GNU General Public License for more
20 * You should have received a copy of the GNU General Public License
21 * along with wimlib; if not, see http://www.gnu.org/licenses/.
24 /* This a compressor for the LZMS compression format. More details about this
25 * format can be found in lzms-decompress.c.
27 * This is currently an unsophisticated implementation that is fast but does not
28 * attain the best compression ratios allowed by the format.
36 #include "wimlib/assert.h"
37 #include "wimlib/compiler.h"
38 #include "wimlib/compressor_ops.h"
39 #include "wimlib/compress_common.h"
40 #include "wimlib/endianness.h"
41 #include "wimlib/error.h"
42 #include "wimlib/lz_hash.h"
43 #include "wimlib/lz_sarray.h"
44 #include "wimlib/lzms.h"
45 #include "wimlib/util.h"
50 #define LZMS_OPTIM_ARRAY_SIZE 1024
52 struct lzms_compressor;
53 struct lzms_adaptive_state {
54 struct lzms_lz_lru_queues lru;
59 #define LZ_ADAPTIVE_STATE struct lzms_adaptive_state
60 #define LZ_COMPRESSOR struct lzms_compressor
61 #include "wimlib/lz_optimal.h"
63 /* Stucture used for writing raw bits to the end of the LZMS-compressed data as
64 * a series of 16-bit little endian coding units. */
65 struct lzms_output_bitstream {
66 /* Buffer variable containing zero or more bits that have been logically
67 * written to the bitstream but not yet written to memory. This must be
68 * at least as large as the coding unit size. */
71 /* Number of bits in @bitbuf that are valid. */
72 unsigned num_free_bits;
74 /* Pointer to one past the next position in the compressed data buffer
75 * at which to output a 16-bit coding unit. */
78 /* Maximum number of 16-bit coding units that can still be output to
79 * the compressed data buffer. */
80 size_t num_le16_remaining;
82 /* Set to %true if not all coding units could be output due to
83 * insufficient space. */
87 /* Stucture used for range encoding (raw version). */
88 struct lzms_range_encoder_raw {
90 /* A 33-bit variable that holds the low boundary of the current range.
91 * The 33rd bit is needed to catch carries. */
94 /* Size of the current range. */
97 /* Next 16-bit coding unit to output. */
100 /* Number of 16-bit coding units whose output has been delayed due to
101 * possible carrying. The first such coding unit is @cache; all
102 * subsequent such coding units are 0xffff. */
105 /* Pointer to the next position in the compressed data buffer at which
106 * to output a 16-bit coding unit. */
109 /* Maximum number of 16-bit coding units that can still be output to
110 * the compressed data buffer. */
111 size_t num_le16_remaining;
113 /* %true when the very first coding unit has not yet been output. */
116 /* Set to %true if not all coding units could be output due to
117 * insufficient space. */
121 /* Structure used for range encoding. This wraps around `struct
122 * lzms_range_encoder_raw' to use and maintain probability entries. */
123 struct lzms_range_encoder {
124 /* Pointer to the raw range encoder, which has no persistent knowledge
125 * of probabilities. Multiple lzms_range_encoder's share the same
126 * lzms_range_encoder_raw. */
127 struct lzms_range_encoder_raw *rc;
129 /* Bits recently encoded by this range encoder. This are used as in
130 * index into @prob_entries. */
133 /* Bitmask for @state to prevent its value from exceeding the number of
134 * probability entries. */
137 /* Probability entries being used for this range encoder. */
138 struct lzms_probability_entry prob_entries[LZMS_MAX_NUM_STATES];
141 /* Structure used for Huffman encoding, optionally encoding larger "values" as a
142 * Huffman symbol specifying a slot and a slot-dependent number of extra bits.
144 struct lzms_huffman_encoder {
146 /* Bitstream to write Huffman-encoded symbols and verbatim bits to.
147 * Multiple lzms_huffman_encoder's share the same lzms_output_bitstream.
149 struct lzms_output_bitstream *os;
151 /* Pointer to the slot base table to use. */
152 const u32 *slot_base_tab;
154 /* Number of symbols that have been written using this code far. Reset
155 * to 0 whenever the code is rebuilt. */
156 u32 num_syms_written;
158 /* When @num_syms_written reaches this number, the Huffman code must be
162 /* Number of symbols in the represented Huffman code. */
165 /* Running totals of symbol frequencies. These are diluted slightly
166 * whenever the code is rebuilt. */
167 u32 sym_freqs[LZMS_MAX_NUM_SYMS];
169 /* The length, in bits, of each symbol in the Huffman code. */
170 u8 lens[LZMS_MAX_NUM_SYMS];
172 /* The codeword of each symbol in the Huffman code. */
173 u16 codewords[LZMS_MAX_NUM_SYMS];
176 /* State of the LZMS compressor. */
177 struct lzms_compressor {
178 /* Pointer to a buffer holding the preprocessed data to compress. */
181 /* Current position in @buffer. */
184 /* Size of the data in @buffer. */
187 /* Temporary array used by lz_analyze_block(); must be at least as long
191 /* Suffix array match-finder. */
192 struct lz_sarray lz_sarray;
194 struct raw_match matches[64];
197 struct lz_match_chooser mc;
199 /* Maximum block size this compressor instantiation allows. This is the
200 * allocated size of @window. */
203 /* Raw range encoder which outputs to the beginning of the compressed
204 * data buffer, proceeding forwards. */
205 struct lzms_range_encoder_raw rc;
207 /* Bitstream which outputs to the end of the compressed data buffer,
208 * proceeding backwards. */
209 struct lzms_output_bitstream os;
211 /* Range encoders. */
212 struct lzms_range_encoder main_range_encoder;
213 struct lzms_range_encoder match_range_encoder;
214 struct lzms_range_encoder lz_match_range_encoder;
215 struct lzms_range_encoder lz_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
216 struct lzms_range_encoder delta_match_range_encoder;
217 struct lzms_range_encoder delta_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
219 /* Huffman encoders. */
220 struct lzms_huffman_encoder literal_encoder;
221 struct lzms_huffman_encoder lz_offset_encoder;
222 struct lzms_huffman_encoder length_encoder;
223 struct lzms_huffman_encoder delta_power_encoder;
224 struct lzms_huffman_encoder delta_offset_encoder;
226 /* LRU (least-recently-used) queues for match information. */
227 struct lzms_lru_queues lru;
229 /* Used for preprocessing. */
230 s32 last_target_usages[65536];
233 /* Initialize the output bitstream @os to write forwards to the specified
234 * compressed data buffer @out that is @out_limit 16-bit integers long. */
236 lzms_output_bitstream_init(struct lzms_output_bitstream *os,
237 le16 *out, size_t out_limit)
240 os->num_free_bits = 16;
241 os->out = out + out_limit;
242 os->num_le16_remaining = out_limit;
246 /* Write @num_bits bits, contained in the low @num_bits bits of @bits (ordered
247 * from high-order to low-order), to the output bitstream @os. */
249 lzms_output_bitstream_put_bits(struct lzms_output_bitstream *os,
250 u32 bits, unsigned num_bits)
252 bits &= (1U << num_bits) - 1;
254 while (num_bits > os->num_free_bits) {
256 if (unlikely(os->num_le16_remaining == 0)) {
261 unsigned num_fill_bits = os->num_free_bits;
263 os->bitbuf <<= num_fill_bits;
264 os->bitbuf |= bits >> (num_bits - num_fill_bits);
266 *--os->out = cpu_to_le16(os->bitbuf);
267 --os->num_le16_remaining;
269 os->num_free_bits = 16;
270 num_bits -= num_fill_bits;
271 bits &= (1U << num_bits) - 1;
273 os->bitbuf <<= num_bits;
275 os->num_free_bits -= num_bits;
278 /* Flush the output bitstream, ensuring that all bits written to it have been
279 * written to memory. Returns %true if all bits were output successfully, or
280 * %false if an overrun occurred. */
282 lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
284 if (os->num_free_bits != 16)
285 lzms_output_bitstream_put_bits(os, 0, os->num_free_bits + 1);
289 /* Initialize the range encoder @rc to write forwards to the specified
290 * compressed data buffer @out that is @out_limit 16-bit integers long. */
292 lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
293 le16 *out, size_t out_limit)
296 rc->range = 0xffffffff;
300 rc->num_le16_remaining = out_limit;
306 * Attempt to flush bits from the range encoder.
308 * Note: this is based on the public domain code for LZMA written by Igor
309 * Pavlov. The only differences in this function are that in LZMS the bits must
310 * be output in 16-bit coding units instead of 8-bit coding units, and that in
311 * LZMS the first coding unit is not ignored by the decompressor, so the encoder
312 * cannot output a dummy value to that position.
314 * The basic idea is that we're writing bits from @rc->low to the output.
315 * However, due to carrying, the writing of coding units with value 0xffff, as
316 * well as one prior coding unit, must be delayed until it is determined whether
320 lzms_range_encoder_raw_shift_low(struct lzms_range_encoder_raw *rc)
322 LZMS_DEBUG("low=%"PRIx64", cache=%"PRIx64", cache_size=%u",
323 rc->low, rc->cache, rc->cache_size);
324 if ((u32)(rc->low) < 0xffff0000 ||
325 (u32)(rc->low >> 32) != 0)
327 /* Carry not needed (rc->low < 0xffff0000), or carry occurred
328 * ((rc->low >> 32) != 0, a.k.a. the carry bit is 1). */
331 if (rc->num_le16_remaining == 0) {
335 *rc->out++ = cpu_to_le16(rc->cache +
336 (u16)(rc->low >> 32));
337 --rc->num_le16_remaining;
343 } while (--rc->cache_size != 0);
345 rc->cache = (rc->low >> 16) & 0xffff;
348 rc->low = (rc->low & 0xffff) << 16;
352 lzms_range_encoder_raw_normalize(struct lzms_range_encoder_raw *rc)
354 if (rc->range <= 0xffff) {
356 lzms_range_encoder_raw_shift_low(rc);
361 lzms_range_encoder_raw_flush(struct lzms_range_encoder_raw *rc)
363 for (unsigned i = 0; i < 4; i++)
364 lzms_range_encoder_raw_shift_low(rc);
368 /* Encode the next bit using the range encoder (raw version).
370 * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0. */
372 lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc, int bit,
375 lzms_range_encoder_raw_normalize(rc);
377 u32 bound = (rc->range >> LZMS_PROBABILITY_BITS) * prob;
386 /* Encode a bit using the specified range encoder. This wraps around
387 * lzms_range_encoder_raw_encode_bit() to handle using and updating the
388 * appropriate probability table. */
390 lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
392 struct lzms_probability_entry *prob_entry;
395 /* Load the probability entry corresponding to the current state. */
396 prob_entry = &enc->prob_entries[enc->state];
398 /* Treat the number of zero bits in the most recently encoded
399 * LZMS_PROBABILITY_MAX bits with this probability entry as the chance,
400 * out of LZMS_PROBABILITY_MAX, that the next bit will be a 0. However,
401 * don't allow 0% or 100% probabilities. */
402 prob = prob_entry->num_recent_zero_bits;
405 else if (prob == LZMS_PROBABILITY_MAX)
406 prob = LZMS_PROBABILITY_MAX - 1;
408 /* Encode the next bit. */
409 lzms_range_encoder_raw_encode_bit(enc->rc, bit, prob);
411 /* Update the state based on the newly encoded bit. */
412 enc->state = ((enc->state << 1) | bit) & enc->mask;
414 /* Update the recent bits, including the cached count of 0's. */
415 BUILD_BUG_ON(LZMS_PROBABILITY_MAX > sizeof(prob_entry->recent_bits) * 8);
417 if (prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1))) {
418 /* Replacing 1 bit with 0 bit; increment the zero count.
420 prob_entry->num_recent_zero_bits++;
423 if (!(prob_entry->recent_bits & (1ULL << (LZMS_PROBABILITY_MAX - 1)))) {
424 /* Replacing 0 bit with 1 bit; decrement the zero count.
426 prob_entry->num_recent_zero_bits--;
429 prob_entry->recent_bits = (prob_entry->recent_bits << 1) | bit;
432 /* Encode a symbol using the specified Huffman encoder. */
434 lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, u32 sym)
436 LZMS_ASSERT(sym < enc->num_syms);
437 if (enc->num_syms_written == enc->rebuild_freq) {
438 /* Adaptive code needs to be rebuilt. */
439 LZMS_DEBUG("Rebuilding code (num_syms=%u)", enc->num_syms);
440 make_canonical_huffman_code(enc->num_syms,
441 LZMS_MAX_CODEWORD_LEN,
446 /* Dilute the frequencies. */
447 for (unsigned i = 0; i < enc->num_syms; i++) {
448 enc->sym_freqs[i] >>= 1;
449 enc->sym_freqs[i] += 1;
451 enc->num_syms_written = 0;
453 lzms_output_bitstream_put_bits(enc->os,
456 ++enc->num_syms_written;
457 ++enc->sym_freqs[sym];
460 /* Encode a number as a Huffman symbol specifying a slot, plus a number of
461 * slot-dependent extra bits. */
463 lzms_encode_value(struct lzms_huffman_encoder *enc, u32 value)
466 unsigned num_extra_bits;
469 LZMS_ASSERT(enc->slot_base_tab != NULL);
471 slot = lzms_get_slot(value, enc->slot_base_tab, enc->num_syms);
473 /* Get the number of extra bits needed to represent the range of values
474 * that share the slot. */
475 num_extra_bits = bsr32(enc->slot_base_tab[slot + 1] -
476 enc->slot_base_tab[slot]);
478 /* Calculate the extra bits as the offset from the slot base. */
479 extra_bits = value - enc->slot_base_tab[slot];
481 /* Output the slot (Huffman-encoded), then the extra bits (verbatim).
483 lzms_huffman_encode_symbol(enc, slot);
484 lzms_output_bitstream_put_bits(enc->os, extra_bits, num_extra_bits);
488 lzms_begin_encode_item(struct lzms_compressor *ctx)
490 ctx->lru.lz.upcoming_offset = 0;
491 ctx->lru.delta.upcoming_offset = 0;
492 ctx->lru.delta.upcoming_power = 0;
496 lzms_end_encode_item(struct lzms_compressor *ctx, u32 length)
498 LZMS_ASSERT(ctx->window_size - ctx->cur_window_pos >= length);
499 ctx->cur_window_pos += length;
500 lzms_update_lru_queues(&ctx->lru);
503 /* Encode a literal byte. */
505 lzms_encode_literal(struct lzms_compressor *ctx, u8 literal)
507 LZMS_DEBUG("Position %u: Encoding literal 0x%02x ('%c')",
508 ctx->cur_window_pos, literal, literal);
510 lzms_begin_encode_item(ctx);
512 /* Main bit: 0 = a literal, not a match. */
513 lzms_range_encode_bit(&ctx->main_range_encoder, 0);
515 /* Encode the literal using the current literal Huffman code. */
516 lzms_huffman_encode_symbol(&ctx->literal_encoder, literal);
518 lzms_end_encode_item(ctx, 1);
521 /* Encode a (length, offset) pair (LZ match). */
523 lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset)
525 int recent_offset_idx;
527 LZMS_ASSERT(!memcmp(&ctx->window[ctx->cur_window_pos],
528 &ctx->window[ctx->cur_window_pos - offset],
531 lzms_begin_encode_item(ctx);
533 LZMS_DEBUG("Position %u: Encoding LZ match {length=%u, offset=%u}",
534 ctx->cur_window_pos, length, offset);
536 /* Main bit: 1 = a match, not a literal. */
537 lzms_range_encode_bit(&ctx->main_range_encoder, 1);
539 /* Match bit: 0 = a LZ match, not a delta match. */
540 lzms_range_encode_bit(&ctx->match_range_encoder, 0);
542 /* Determine if the offset can be represented as a recent offset. */
543 for (recent_offset_idx = 0;
544 recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
546 if (offset == ctx->lru.lz.recent_offsets[recent_offset_idx])
549 if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
550 /* Explicit offset. */
552 /* LZ match bit: 0 = explicit offset, not a repeat offset. */
553 lzms_range_encode_bit(&ctx->lz_match_range_encoder, 0);
555 /* Encode the match offset. */
556 lzms_encode_value(&ctx->lz_offset_encoder, offset);
562 /* LZ match bit: 1 = repeat offset, not an explicit offset. */
563 lzms_range_encode_bit(&ctx->lz_match_range_encoder, 1);
565 /* Encode the recent offset index. A 1 bit is encoded for each
566 * index passed up. This sequence of 1 bits is terminated by a
567 * 0 bit, or automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1
568 * bits have been encoded. */
569 for (i = 0; i < recent_offset_idx; i++)
570 lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 1);
572 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
573 lzms_range_encode_bit(&ctx->lz_repeat_match_range_encoders[i], 0);
575 /* Initial update of the LZ match offset LRU queue. */
576 for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
577 ctx->lru.lz.recent_offsets[i] = ctx->lru.lz.recent_offsets[i + 1];
580 /* Encode the match length. */
581 lzms_encode_value(&ctx->length_encoder, length);
583 /* Save the match offset for later insertion at the front of the LZ
584 * match offset LRU queue. */
585 ctx->lru.lz.upcoming_offset = offset;
587 lzms_end_encode_item(ctx, length);
591 lzms_record_literal(u8 literal, void *_ctx)
593 struct lzms_compressor *ctx = _ctx;
595 lzms_encode_literal(ctx, literal);
599 lzms_record_match(unsigned length, unsigned offset, void *_ctx)
601 struct lzms_compressor *ctx = _ctx;
603 lzms_encode_lz_match(ctx, length, offset);
607 lzms_fast_encode(struct lzms_compressor *ctx)
609 static const struct lz_params lzms_lz_params = {
611 .max_match = UINT_MAX,
612 .max_offset = UINT_MAX,
616 .max_lazy_match = 258,
620 lz_analyze_block(ctx->window,
630 /* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
631 * Unlike lzms_get_match_cost(), which does a true cost evaluation, this simply
632 * prioritize matches based on their offset. */
634 lzms_match_cost_fast(input_idx_t length, input_idx_t offset, const void *_lru)
636 const struct lzms_lz_lru_queues *lru = _lru;
638 for (input_idx_t i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++)
639 if (offset == lru->recent_offsets[i])
646 lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 *cur_state, int bit)
651 prob = enc->prob_entries[*cur_state & enc->mask].num_recent_zero_bits;
654 else if (prob == LZMS_PROBABILITY_MAX)
655 prob = LZMS_PROBABILITY_MAX - 1;
658 prob = LZMS_PROBABILITY_MAX - prob;
660 cost = prob * 2; /* TODO */
662 *cur_state = (*cur_state << 1) | bit;
667 #define LZMS_COST_SCALE 64
670 lzms_huffman_symbol_cost(const struct lzms_huffman_encoder *enc, u32 sym)
672 return enc->lens[sym] * LZMS_COST_SCALE;
676 lzms_value_cost(const struct lzms_huffman_encoder *enc, u32 value)
682 slot = lzms_get_slot(value, enc->slot_base_tab, enc->num_syms);
684 cost += lzms_huffman_symbol_cost(enc, slot);
686 num_extra_bits = bsr32(enc->slot_base_tab[slot + 1] -
687 enc->slot_base_tab[slot]);
689 cost += num_extra_bits * LZMS_COST_SCALE;
695 lzms_get_matches(struct lzms_compressor *ctx,
696 const struct lzms_adaptive_state *cost_state,
697 struct raw_match **matches_ret)
700 struct raw_match *matches = ctx->matches;
702 num_matches = lz_sarray_get_matches(&ctx->lz_sarray,
704 lzms_match_cost_fast,
707 #ifdef ENABLE_LZMS_DEBUG
708 u32 curpos = lz_sarray_get_pos(&ctx->lz_sarray) - 1;
709 LZMS_ASSERT(curpos >= 0);
710 for (u32 i = 0; i < num_matches; i++) {
711 LZMS_ASSERT(matches[i].len <= ctx->window_size - curpos);
712 LZMS_ASSERT(matches[i].offset > 0);
713 LZMS_ASSERT(matches[i].offset <= curpos);
714 LZMS_ASSERT(!memcmp(&ctx->window[curpos],
715 &ctx->window[curpos - matches[i].offset],
718 LZMS_ASSERT(matches[i - 1].len > matches[i].len);
723 *matches_ret = matches;
728 lzms_skip_bytes(struct lzms_compressor *ctx, input_idx_t n)
731 lz_sarray_skip_position(&ctx->lz_sarray);
735 lzms_get_prev_literal_cost(struct lzms_compressor *ctx,
736 struct lzms_adaptive_state *cost_state)
738 u8 literal = ctx->window[lz_sarray_get_pos(&ctx->lz_sarray) - 1];
741 cost_state->lru.upcoming_offset = 0;
742 lzms_update_lz_lru_queues(&cost_state->lru);
744 cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
745 &cost_state->main_state, 0);
746 cost += lzms_huffman_symbol_cost(&ctx->literal_encoder, literal);
752 lzms_get_match_cost(struct lzms_compressor *ctx,
753 struct lzms_adaptive_state *cost_state,
754 input_idx_t length, input_idx_t offset)
757 int recent_offset_idx;
759 cost += lzms_rc_bit_cost(&ctx->main_range_encoder,
760 &cost_state->main_state, 1);
761 cost += lzms_rc_bit_cost(&ctx->match_range_encoder,
762 &cost_state->match_state, 0);
764 for (recent_offset_idx = 0;
765 recent_offset_idx < LZMS_NUM_RECENT_OFFSETS;
767 if (offset == cost_state->lru.recent_offsets[recent_offset_idx])
770 if (recent_offset_idx == LZMS_NUM_RECENT_OFFSETS) {
771 /* Explicit offset. */
772 cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
773 &cost_state->lz_match_state, 0);
775 cost += lzms_value_cost(&ctx->lz_offset_encoder, offset);
780 cost += lzms_rc_bit_cost(&ctx->lz_match_range_encoder,
781 &cost_state->lz_match_state, 1);
783 for (i = 0; i < recent_offset_idx; i++)
786 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
789 /* Initial update of the LZ match offset LRU queue. */
790 for (; i < LZMS_NUM_RECENT_OFFSETS; i++)
791 cost_state->lru.recent_offsets[i] = cost_state->lru.recent_offsets[i + 1];
794 cost += lzms_value_cost(&ctx->length_encoder, length);
796 cost_state->lru.upcoming_offset = offset;
797 lzms_update_lz_lru_queues(&cost_state->lru);
802 static struct raw_match
803 lzms_get_near_optimal_match(struct lzms_compressor *ctx)
805 struct lzms_adaptive_state initial_state = {
807 .main_state = ctx->main_range_encoder.state,
808 .match_state = ctx->match_range_encoder.state,
809 .lz_match_state = ctx->lz_match_range_encoder.state,
811 return lz_get_near_optimal_match(&ctx->mc,
814 lzms_get_prev_literal_cost,
821 lzms_slow_encode(struct lzms_compressor *ctx)
823 struct raw_match match;
825 /* Load window into suffix array match-finder. */
826 lz_sarray_load_window(&ctx->lz_sarray, ctx->window, ctx->window_size);
828 /* Reset the match-chooser. */
829 lz_match_chooser_begin(&ctx->mc);
832 while (ctx->cur_window_pos != ctx->window_size) {
834 match = lzms_get_near_optimal_match(ctx);
835 if (match.len <= 1) {
837 lzms_encode_literal(ctx, ctx->window[ctx->cur_window_pos]);
840 lzms_encode_lz_match(ctx, match.len, match.offset);
846 lzms_init_range_encoder(struct lzms_range_encoder *enc,
847 struct lzms_range_encoder_raw *rc, u32 num_states)
851 enc->mask = num_states - 1;
852 for (u32 i = 0; i < num_states; i++) {
853 enc->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
854 enc->prob_entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
859 lzms_init_huffman_encoder(struct lzms_huffman_encoder *enc,
860 struct lzms_output_bitstream *os,
861 const u32 *slot_base_tab,
863 unsigned rebuild_freq)
866 enc->slot_base_tab = slot_base_tab;
867 enc->num_syms_written = rebuild_freq;
868 enc->rebuild_freq = rebuild_freq;
869 enc->num_syms = num_syms;
870 for (unsigned i = 0; i < num_syms; i++)
871 enc->sym_freqs[i] = 1;
874 /* Initialize the LZMS compressor. */
876 lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen,
877 le16 *cdata, u32 clen16)
879 unsigned num_position_slots;
881 /* Copy the uncompressed data into the @ctx->window buffer. */
882 memcpy(ctx->window, udata, ulen);
883 memset(&ctx->window[ulen], 0, 8);
884 ctx->cur_window_pos = 0;
885 ctx->window_size = ulen;
887 /* Initialize the raw range encoder (writing forwards). */
888 lzms_range_encoder_raw_init(&ctx->rc, cdata, clen16);
890 /* Initialize the output bitstream for Huffman symbols and verbatim bits
891 * (writing backwards). */
892 lzms_output_bitstream_init(&ctx->os, cdata, clen16);
894 /* Initialize position and length slot bases if not done already. */
895 lzms_init_slot_bases();
897 /* Calculate the number of position slots needed for this compressed
899 num_position_slots = lzms_get_position_slot(ulen - 1) + 1;
901 LZMS_DEBUG("Using %u position slots", num_position_slots);
903 /* Initialize Huffman encoders for each alphabet used in the compressed
905 lzms_init_huffman_encoder(&ctx->literal_encoder, &ctx->os,
906 NULL, LZMS_NUM_LITERAL_SYMS,
907 LZMS_LITERAL_CODE_REBUILD_FREQ);
909 lzms_init_huffman_encoder(&ctx->lz_offset_encoder, &ctx->os,
910 lzms_position_slot_base, num_position_slots,
911 LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
913 lzms_init_huffman_encoder(&ctx->length_encoder, &ctx->os,
914 lzms_length_slot_base, LZMS_NUM_LEN_SYMS,
915 LZMS_LENGTH_CODE_REBUILD_FREQ);
917 lzms_init_huffman_encoder(&ctx->delta_offset_encoder, &ctx->os,
918 lzms_position_slot_base, num_position_slots,
919 LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
921 lzms_init_huffman_encoder(&ctx->delta_power_encoder, &ctx->os,
922 NULL, LZMS_NUM_DELTA_POWER_SYMS,
923 LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
925 /* Initialize range encoders, all of which wrap around the same
926 * lzms_range_encoder_raw. */
927 lzms_init_range_encoder(&ctx->main_range_encoder,
928 &ctx->rc, LZMS_NUM_MAIN_STATES);
930 lzms_init_range_encoder(&ctx->match_range_encoder,
931 &ctx->rc, LZMS_NUM_MATCH_STATES);
933 lzms_init_range_encoder(&ctx->lz_match_range_encoder,
934 &ctx->rc, LZMS_NUM_LZ_MATCH_STATES);
936 for (size_t i = 0; i < ARRAY_LEN(ctx->lz_repeat_match_range_encoders); i++)
937 lzms_init_range_encoder(&ctx->lz_repeat_match_range_encoders[i],
938 &ctx->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
940 lzms_init_range_encoder(&ctx->delta_match_range_encoder,
941 &ctx->rc, LZMS_NUM_DELTA_MATCH_STATES);
943 for (size_t i = 0; i < ARRAY_LEN(ctx->delta_repeat_match_range_encoders); i++)
944 lzms_init_range_encoder(&ctx->delta_repeat_match_range_encoders[i],
945 &ctx->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
947 /* Initialize LRU match information. */
948 lzms_init_lru_queues(&ctx->lru);
951 /* Flush the output streams, prepare the final compressed data, and return its
954 * A return value of 0 indicates that the data could not be compressed to fit in
955 * the available space. */
957 lzms_finalize(struct lzms_compressor *ctx, u8 *cdata, size_t csize_avail)
959 size_t num_forwards_bytes;
960 size_t num_backwards_bytes;
961 size_t compressed_size;
963 /* Flush both the forwards and backwards streams, and make sure they
964 * didn't cross each other and start overwriting each other's data. */
965 if (!lzms_output_bitstream_flush(&ctx->os)) {
966 LZMS_DEBUG("Backwards bitstream overrun.");
970 if (!lzms_range_encoder_raw_flush(&ctx->rc)) {
971 LZMS_DEBUG("Forwards bitstream overrun.");
975 if (ctx->rc.out > ctx->os.out) {
976 LZMS_DEBUG("Two bitstreams crossed.");
980 /* Now the compressed buffer contains the data output by the forwards
981 * bitstream, then empty space, then data output by the backwards
982 * bitstream. Move the data output by the forwards bitstream to be
983 * adjacent to the data output by the backwards bitstream, and calculate
984 * the compressed size that this results in. */
985 num_forwards_bytes = (u8*)ctx->rc.out - (u8*)cdata;
986 num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)ctx->os.out;
988 memmove(cdata + num_forwards_bytes, ctx->os.out, num_backwards_bytes);
990 compressed_size = num_forwards_bytes + num_backwards_bytes;
991 LZMS_DEBUG("num_forwards_bytes=%zu, num_backwards_bytes=%zu, "
992 "compressed_size=%zu",
993 num_forwards_bytes, num_backwards_bytes, compressed_size);
994 LZMS_ASSERT(!(compressed_size & 1));
995 return compressed_size;
999 lzms_compress(const void *uncompressed_data, size_t uncompressed_size,
1000 void *compressed_data, size_t compressed_size_avail, void *_ctx)
1002 struct lzms_compressor *ctx = _ctx;
1003 size_t compressed_size;
1005 LZMS_DEBUG("uncompressed_size=%zu, compressed_size_avail=%zu",
1006 uncompressed_size, compressed_size_avail);
1008 /* Make sure the uncompressed size is compatible with this compressor.
1010 if (uncompressed_size > ctx->max_block_size) {
1011 LZMS_DEBUG("Can't compress %zu bytes: LZMS context "
1012 "only supports %u bytes",
1013 uncompressed_size, ctx->max_block_size);
1017 /* Don't bother compressing extremely small inputs. */
1018 if (uncompressed_size < 4)
1021 /* Cap the available compressed size to a 32-bit integer, and round it
1022 * down to the nearest multiple of 2. */
1023 if (compressed_size_avail > UINT32_MAX)
1024 compressed_size_avail = UINT32_MAX;
1025 if (compressed_size_avail & 1)
1026 compressed_size_avail--;
1028 /* Initialize the compressor structures. */
1029 lzms_init_compressor(ctx, uncompressed_data, uncompressed_size,
1030 compressed_data, compressed_size_avail / 2);
1032 /* Preprocess the uncompressed data. */
1033 lzms_x86_filter(ctx->window, ctx->window_size,
1034 ctx->last_target_usages, false);
1036 /* Determine and output a literal/match sequence that decompresses to
1037 * the preprocessed data. */
1039 lzms_slow_encode(ctx);
1041 lzms_fast_encode(ctx);
1043 /* Get and return the compressed data size. */
1044 compressed_size = lzms_finalize(ctx, compressed_data,
1045 compressed_size_avail);
1047 if (compressed_size == 0) {
1048 LZMS_DEBUG("Data did not compress to requested size or less.");
1052 LZMS_DEBUG("Compressed %zu => %zu bytes",
1053 uncompressed_size, compressed_size);
1055 #if defined(ENABLE_VERIFY_COMPRESSION) || defined(ENABLE_LZMS_DEBUG)
1056 /* Verify that we really get the same thing back when decompressing. */
1058 struct wimlib_decompressor *decompressor;
1060 LZMS_DEBUG("Verifying LZMS compression.");
1062 if (0 == wimlib_create_decompressor(WIMLIB_COMPRESSION_TYPE_LZMS,
1063 ctx->max_block_size,
1068 ret = wimlib_decompress(compressed_data,
1073 wimlib_free_decompressor(decompressor);
1076 ERROR("Failed to decompress data we "
1077 "compressed using LZMS algorithm");
1081 if (memcmp(uncompressed_data, ctx->window,
1084 ERROR("Data we compressed using LZMS algorithm "
1085 "didn't decompress to original");
1090 WARNING("Failed to create decompressor for "
1091 "data verification!");
1094 #endif /* ENABLE_LZMS_DEBUG || ENABLE_VERIFY_COMPRESSION */
1096 return compressed_size;
1100 lzms_free_compressor(void *_ctx)
1102 struct lzms_compressor *ctx = _ctx;
1106 FREE(ctx->prev_tab);
1107 lz_sarray_destroy(&ctx->lz_sarray);
1108 lz_match_chooser_destroy(&ctx->mc);
1114 lzms_create_compressor(size_t max_block_size,
1115 const struct wimlib_compressor_params_header *params,
1118 struct lzms_compressor *ctx;
1120 if (max_block_size == 0 || max_block_size >= INT32_MAX) {
1121 LZMS_DEBUG("Invalid max_block_size (%u)", max_block_size);
1122 return WIMLIB_ERR_INVALID_PARAM;
1125 ctx = CALLOC(1, sizeof(struct lzms_compressor));
1129 ctx->window = MALLOC(max_block_size + 8);
1130 if (ctx->window == NULL)
1133 ctx->prev_tab = MALLOC(max_block_size * sizeof(ctx->prev_tab[0]));
1134 if (ctx->prev_tab == NULL)
1137 if (!lz_sarray_init(&ctx->lz_sarray,
1145 if (!lz_match_chooser_init(&ctx->mc,
1146 LZMS_OPTIM_ARRAY_SIZE,
1151 ctx->max_block_size = max_block_size;
1157 lzms_free_compressor(ctx);
1158 return WIMLIB_ERR_NOMEM;
1161 const struct compressor_ops lzms_compressor_ops = {
1162 .create_compressor = lzms_create_compressor,
1163 .compress = lzms_compress,
1164 .free_compressor = lzms_free_compressor,