4 * A compressor that produces output compatible with the LZMS compression format.
8 * Copyright (C) 2013, 2014 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
28 #include "wimlib/compress_common.h"
29 #include "wimlib/compressor_ops.h"
30 #include "wimlib/endianness.h"
31 #include "wimlib/error.h"
32 #include "wimlib/lz_mf.h"
33 #include "wimlib/lz_repsearch.h"
34 #include "wimlib/lzms.h"
35 #include "wimlib/util.h"
41 /* Stucture used for writing raw bits as a series of 16-bit little endian coding
42 * units. This starts at the *end* of the compressed data buffer and proceeds
44 struct lzms_output_bitstream {
46 /* Bits that haven't yet been written to the output buffer. */
49 /* Number of bits currently held in @bitbuf. */
52 /* Pointer to one past the next position in the compressed data buffer
53 * at which to output a 16-bit coding unit. */
56 /* Pointer to the beginning of the output buffer. (The "end" when
57 * writing backwards!) */
61 /* Stucture used for range encoding (raw version). This starts at the
62 * *beginning* of the compressed data buffer and proceeds forward. */
63 struct lzms_range_encoder_raw {
65 /* A 33-bit variable that holds the low boundary of the current range.
66 * The 33rd bit is needed to catch carries. */
69 /* Size of the current range. */
72 /* Next 16-bit coding unit to output. */
75 /* Number of 16-bit coding units whose output has been delayed due to
76 * possible carrying. The first such coding unit is @cache; all
77 * subsequent such coding units are 0xffff. */
80 /* Pointer to the beginning of the output buffer. */
83 /* Pointer to the position in the output buffer at which the next coding
84 * unit must be written. */
87 /* Pointer just past the end of the output buffer. */
91 /* Structure used for range encoding. This wraps around `struct
92 * lzms_range_encoder_raw' to use and maintain probability entries. */
93 struct lzms_range_encoder {
95 /* Pointer to the raw range encoder, which has no persistent knowledge
96 * of probabilities. Multiple lzms_range_encoder's share the same
97 * lzms_range_encoder_raw. */
98 struct lzms_range_encoder_raw *rc;
100 /* Bits recently encoded by this range encoder. This is used as an
101 * index into @prob_entries. */
104 /* Bitmask for @state to prevent its value from exceeding the number of
105 * probability entries. */
108 /* Probability entries being used for this range encoder. */
109 struct lzms_probability_entry prob_entries[LZMS_MAX_NUM_STATES];
112 /* Structure used for Huffman encoding. */
113 struct lzms_huffman_encoder {
115 /* Bitstream to write Huffman-encoded symbols and verbatim bits to.
116 * Multiple lzms_huffman_encoder's share the same lzms_output_bitstream.
118 struct lzms_output_bitstream *os;
120 /* Number of symbols that have been written using this code far. Reset
121 * to 0 whenever the code is rebuilt. */
122 u32 num_syms_written;
124 /* When @num_syms_written reaches this number, the Huffman code must be
128 /* Number of symbols in the represented Huffman code. */
131 /* Running totals of symbol frequencies. These are diluted slightly
132 * whenever the code is rebuilt. */
133 u32 sym_freqs[LZMS_MAX_NUM_SYMS];
135 /* The length, in bits, of each symbol in the Huffman code. */
136 u8 lens[LZMS_MAX_NUM_SYMS];
138 /* The codeword of each symbol in the Huffman code. */
139 u32 codewords[LZMS_MAX_NUM_SYMS];
142 /* Internal compression parameters */
143 struct lzms_compressor_params {
144 u32 min_match_length;
145 u32 nice_match_length;
146 u32 max_search_depth;
147 u32 optim_array_length;
150 /* State of the LZMS compressor */
151 struct lzms_compressor {
153 /* Internal compression parameters */
154 struct lzms_compressor_params params;
156 /* Data currently being compressed */
160 /* Lempel-Ziv match-finder */
163 /* Temporary space to store found matches */
164 struct lz_match *matches;
166 /* Per-position data for near-optimal parsing */
167 struct lzms_mc_pos_data *optimum;
168 struct lzms_mc_pos_data *optimum_end;
170 /* Raw range encoder which outputs to the beginning of the compressed
171 * data buffer, proceeding forwards */
172 struct lzms_range_encoder_raw rc;
174 /* Bitstream which outputs to the end of the compressed data buffer,
175 * proceeding backwards */
176 struct lzms_output_bitstream os;
179 struct lzms_range_encoder main_range_encoder;
180 struct lzms_range_encoder match_range_encoder;
181 struct lzms_range_encoder lz_match_range_encoder;
182 struct lzms_range_encoder lz_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
183 struct lzms_range_encoder delta_match_range_encoder;
184 struct lzms_range_encoder delta_repeat_match_range_encoders[LZMS_NUM_RECENT_OFFSETS - 1];
186 /* Huffman encoders */
187 struct lzms_huffman_encoder literal_encoder;
188 struct lzms_huffman_encoder lz_offset_encoder;
189 struct lzms_huffman_encoder length_encoder;
190 struct lzms_huffman_encoder delta_power_encoder;
191 struct lzms_huffman_encoder delta_offset_encoder;
193 /* Used for preprocessing */
194 s32 last_target_usages[65536];
196 #define LZMS_NUM_FAST_LENGTHS 256
197 /* Table: length => length slot for small lengths */
198 u8 length_slot_fast[LZMS_NUM_FAST_LENGTHS];
200 /* Table: length => current cost for small match lengths */
201 u32 length_cost_fast[LZMS_NUM_FAST_LENGTHS];
203 #define LZMS_NUM_FAST_OFFSETS 32768
204 /* Table: offset => offset slot for small offsets */
205 u8 offset_slot_fast[LZMS_NUM_FAST_OFFSETS];
209 * Match chooser position data:
211 * An array of these structures is used during the near-optimal match-choosing
212 * algorithm. They correspond to consecutive positions in the window and are
213 * used to keep track of the cost to reach each position, and the match/literal
214 * choices that need to be chosen to reach that position.
216 struct lzms_mc_pos_data {
218 /* The cost, in bits, of the lowest-cost path that has been found to
219 * reach this position. This can change as progressively lower cost
220 * paths are found to reach this position. */
222 #define MC_INFINITE_COST UINT32_MAX
224 /* The match or literal that was taken to reach this position. This can
225 * change as progressively lower cost paths are found to reach this
228 * This variable is divided into two bitfields.
231 * Low bits are 1, high bits are the literal.
233 * Explicit offset matches:
234 * Low bits are the match length, high bits are the offset plus 2.
236 * Repeat offset matches:
237 * Low bits are the match length, high bits are the queue index.
240 #define MC_OFFSET_SHIFT 32
241 #define MC_LEN_MASK (((u64)1 << MC_OFFSET_SHIFT) - 1)
243 /* The LZMS adaptive state that exists at this position. This is filled
244 * in lazily, only after the minimum-cost path to this position is
247 * Note: the way we handle this adaptive state in the "minimum-cost"
248 * parse is actually only an approximation. It's possible for the
249 * globally optimal, minimum cost path to contain a prefix, ending at a
250 * position, where that path prefix is *not* the minimum cost path to
251 * that position. This can happen if such a path prefix results in a
252 * different adaptive state which results in lower costs later. We do
253 * not solve this problem; we only consider the lowest cost to reach
254 * each position, which seems to be an acceptable approximation.
256 * Note: this adaptive state also does not include the probability
257 * entries or current Huffman codewords. Those aren't maintained
258 * per-position and are only updated occassionally. */
259 struct lzms_adaptive_state {
260 struct lzms_lz_lru_queues lru;
264 u8 lz_repeat_match_state[LZMS_NUM_RECENT_OFFSETS - 1];
269 lzms_init_fast_slots(struct lzms_compressor *c)
271 /* Create table mapping small lengths to length slots. */
272 for (unsigned slot = 0, i = 0; i < LZMS_NUM_FAST_LENGTHS; i++) {
273 while (i >= lzms_length_slot_base[slot + 1])
275 c->length_slot_fast[i] = slot;
278 /* Create table mapping small offsets to offset slots. */
279 for (unsigned slot = 0, i = 0; i < LZMS_NUM_FAST_OFFSETS; i++) {
280 while (i >= lzms_offset_slot_base[slot + 1])
282 c->offset_slot_fast[i] = slot;
286 static inline unsigned
287 lzms_get_length_slot_fast(const struct lzms_compressor *c, u32 length)
289 if (likely(length < LZMS_NUM_FAST_LENGTHS))
290 return c->length_slot_fast[length];
292 return lzms_get_length_slot(length);
295 static inline unsigned
296 lzms_get_offset_slot_fast(const struct lzms_compressor *c, u32 offset)
298 if (offset < LZMS_NUM_FAST_OFFSETS)
299 return c->offset_slot_fast[offset];
301 return lzms_get_offset_slot(offset);
304 /* Initialize the output bitstream @os to write backwards to the specified
305 * compressed data buffer @out that is @out_limit 16-bit integers long. */
307 lzms_output_bitstream_init(struct lzms_output_bitstream *os,
308 le16 *out, size_t out_limit)
312 os->next = out + out_limit;
317 * Write some bits, contained in the low @num_bits bits of @bits (ordered from
318 * high-order to low-order), to the output bitstream @os.
320 * @max_num_bits is a compile-time constant that specifies the maximum number of
321 * bits that can ever be written at this call site.
324 lzms_output_bitstream_put_varbits(struct lzms_output_bitstream *os,
325 u32 bits, unsigned num_bits,
326 unsigned max_num_bits)
328 LZMS_ASSERT(num_bits <= 48);
330 /* Add the bits to the bit buffer variable. */
331 os->bitcount += num_bits;
332 os->bitbuf = (os->bitbuf << num_bits) | bits;
334 /* Check whether any coding units need to be written. */
335 while (os->bitcount >= 16) {
339 /* Write a coding unit, unless it would underflow the buffer. */
340 if (os->next != os->begin)
341 *--os->next = cpu_to_le16(os->bitbuf >> os->bitcount);
343 /* Optimization for call sites that never write more than 16
345 if (max_num_bits <= 16)
350 /* Flush the output bitstream, ensuring that all bits written to it have been
351 * written to memory. Returns %true if all bits have been output successfully,
352 * or %false if an overrun occurred. */
354 lzms_output_bitstream_flush(struct lzms_output_bitstream *os)
356 if (os->next == os->begin)
359 if (os->bitcount != 0)
360 *--os->next = cpu_to_le16(os->bitbuf << (16 - os->bitcount));
365 /* Initialize the range encoder @rc to write forwards to the specified
366 * compressed data buffer @out that is @out_limit 16-bit integers long. */
368 lzms_range_encoder_raw_init(struct lzms_range_encoder_raw *rc,
369 le16 *out, size_t out_limit)
372 rc->range = 0xffffffff;
377 rc->end = out + out_limit;
381 * Attempt to flush bits from the range encoder.
383 * Note: this is based on the public domain code for LZMA written by Igor
384 * Pavlov. The only differences in this function are that in LZMS the bits must
385 * be output in 16-bit coding units instead of 8-bit coding units, and that in
386 * LZMS the first coding unit is not ignored by the decompressor, so the encoder
387 * cannot output a dummy value to that position.
389 * The basic idea is that we're writing bits from @rc->low to the output.
390 * However, due to carrying, the writing of coding units with value 0xffff, as
391 * well as one prior coding unit, must be delayed until it is determined whether
395 lzms_range_encoder_raw_shift_low(struct lzms_range_encoder_raw *rc)
397 if ((u32)(rc->low) < 0xffff0000 ||
398 (u32)(rc->low >> 32) != 0)
400 /* Carry not needed (rc->low < 0xffff0000), or carry occurred
401 * ((rc->low >> 32) != 0, a.k.a. the carry bit is 1). */
403 if (likely(rc->next >= rc->begin)) {
404 if (rc->next != rc->end)
405 *rc->next++ = cpu_to_le16(rc->cache +
406 (u16)(rc->low >> 32));
411 } while (--rc->cache_size != 0);
413 rc->cache = (rc->low >> 16) & 0xffff;
416 rc->low = (rc->low & 0xffff) << 16;
420 lzms_range_encoder_raw_normalize(struct lzms_range_encoder_raw *rc)
422 if (rc->range <= 0xffff) {
424 lzms_range_encoder_raw_shift_low(rc);
429 lzms_range_encoder_raw_flush(struct lzms_range_encoder_raw *rc)
431 for (unsigned i = 0; i < 4; i++)
432 lzms_range_encoder_raw_shift_low(rc);
433 return rc->next != rc->end;
436 /* Encode the next bit using the range encoder (raw version).
438 * @prob is the chance out of LZMS_PROBABILITY_MAX that the next bit is 0. */
440 lzms_range_encoder_raw_encode_bit(struct lzms_range_encoder_raw *rc,
443 lzms_range_encoder_raw_normalize(rc);
445 u32 bound = (rc->range >> LZMS_PROBABILITY_BITS) * prob;
454 /* Encode a bit using the specified range encoder. This wraps around
455 * lzms_range_encoder_raw_encode_bit() to handle using and updating the
456 * appropriate state and probability entry. */
458 lzms_range_encode_bit(struct lzms_range_encoder *enc, int bit)
460 struct lzms_probability_entry *prob_entry;
463 /* Load the probability entry corresponding to the current state. */
464 prob_entry = &enc->prob_entries[enc->state];
466 /* Update the state based on the next bit. */
467 enc->state = ((enc->state << 1) | bit) & enc->mask;
469 /* Get the probability that the bit is 0. */
470 prob = lzms_get_probability(prob_entry);
472 /* Update the probability entry. */
473 lzms_update_probability_entry(prob_entry, bit);
475 /* Encode the bit. */
476 lzms_range_encoder_raw_encode_bit(enc->rc, bit, prob);
479 /* Called when an adaptive Huffman code needs to be rebuilt. */
481 lzms_rebuild_huffman_code(struct lzms_huffman_encoder *enc)
483 make_canonical_huffman_code(enc->num_syms,
484 LZMS_MAX_CODEWORD_LEN,
489 /* Dilute the frequencies. */
490 for (unsigned i = 0; i < enc->num_syms; i++) {
491 enc->sym_freqs[i] >>= 1;
492 enc->sym_freqs[i] += 1;
494 enc->num_syms_written = 0;
497 /* Encode a symbol using the specified Huffman encoder. */
499 lzms_huffman_encode_symbol(struct lzms_huffman_encoder *enc, unsigned sym)
501 lzms_output_bitstream_put_varbits(enc->os,
504 LZMS_MAX_CODEWORD_LEN);
505 ++enc->sym_freqs[sym];
506 if (++enc->num_syms_written == enc->rebuild_freq)
507 lzms_rebuild_huffman_code(enc);
511 lzms_update_fast_length_costs(struct lzms_compressor *c);
513 /* Encode a match length. */
515 lzms_encode_length(struct lzms_compressor *c, u32 length)
518 unsigned num_extra_bits;
521 slot = lzms_get_length_slot_fast(c, length);
523 extra_bits = length - lzms_length_slot_base[slot];
524 num_extra_bits = lzms_extra_length_bits[slot];
526 lzms_huffman_encode_symbol(&c->length_encoder, slot);
527 if (c->length_encoder.num_syms_written == 0)
528 lzms_update_fast_length_costs(c);
530 lzms_output_bitstream_put_varbits(c->length_encoder.os,
531 extra_bits, num_extra_bits, 30);
534 /* Encode an LZ match offset. */
536 lzms_encode_lz_offset(struct lzms_compressor *c, u32 offset)
539 unsigned num_extra_bits;
542 slot = lzms_get_offset_slot_fast(c, offset);
544 extra_bits = offset - lzms_offset_slot_base[slot];
545 num_extra_bits = lzms_extra_offset_bits[slot];
547 lzms_huffman_encode_symbol(&c->lz_offset_encoder, slot);
548 lzms_output_bitstream_put_varbits(c->lz_offset_encoder.os,
549 extra_bits, num_extra_bits, 30);
552 /* Encode a literal byte. */
554 lzms_encode_literal(struct lzms_compressor *c, unsigned literal)
556 /* Main bit: 0 = a literal, not a match. */
557 lzms_range_encode_bit(&c->main_range_encoder, 0);
559 /* Encode the literal using the current literal Huffman code. */
560 lzms_huffman_encode_symbol(&c->literal_encoder, literal);
563 /* Encode an LZ repeat offset match. */
565 lzms_encode_lz_repeat_offset_match(struct lzms_compressor *c,
566 u32 length, unsigned rep_index)
570 /* Main bit: 1 = a match, not a literal. */
571 lzms_range_encode_bit(&c->main_range_encoder, 1);
573 /* Match bit: 0 = an LZ match, not a delta match. */
574 lzms_range_encode_bit(&c->match_range_encoder, 0);
576 /* LZ match bit: 1 = repeat offset, not an explicit offset. */
577 lzms_range_encode_bit(&c->lz_match_range_encoder, 1);
579 /* Encode the repeat offset index. A 1 bit is encoded for each index
580 * passed up. This sequence of 1 bits is terminated by a 0 bit, or
581 * automatically when (LZMS_NUM_RECENT_OFFSETS - 1) 1 bits have been
583 for (i = 0; i < rep_index; i++)
584 lzms_range_encode_bit(&c->lz_repeat_match_range_encoders[i], 1);
586 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
587 lzms_range_encode_bit(&c->lz_repeat_match_range_encoders[i], 0);
589 /* Encode the match length. */
590 lzms_encode_length(c, length);
593 /* Encode an LZ explicit offset match. */
595 lzms_encode_lz_explicit_offset_match(struct lzms_compressor *c,
596 u32 length, u32 offset)
598 /* Main bit: 1 = a match, not a literal. */
599 lzms_range_encode_bit(&c->main_range_encoder, 1);
601 /* Match bit: 0 = an LZ match, not a delta match. */
602 lzms_range_encode_bit(&c->match_range_encoder, 0);
604 /* LZ match bit: 0 = explicit offset, not a repeat offset. */
605 lzms_range_encode_bit(&c->lz_match_range_encoder, 0);
607 /* Encode the match offset. */
608 lzms_encode_lz_offset(c, offset);
610 /* Encode the match length. */
611 lzms_encode_length(c, length);
615 lzms_encode_item(struct lzms_compressor *c, u64 mc_item_data)
617 u32 len = mc_item_data & MC_LEN_MASK;
618 u32 offset_data = mc_item_data >> MC_OFFSET_SHIFT;
621 lzms_encode_literal(c, offset_data);
622 else if (offset_data < LZMS_NUM_RECENT_OFFSETS)
623 lzms_encode_lz_repeat_offset_match(c, len, offset_data);
625 lzms_encode_lz_explicit_offset_match(c, len, offset_data - LZMS_OFFSET_OFFSET);
628 /* Encode a list of matches and literals chosen by the parsing algorithm. */
630 lzms_encode_item_list(struct lzms_compressor *c,
631 struct lzms_mc_pos_data *cur_optimum_ptr)
633 struct lzms_mc_pos_data *end_optimum_ptr;
637 /* The list is currently in reverse order (last item to first item).
639 end_optimum_ptr = cur_optimum_ptr;
640 saved_item = cur_optimum_ptr->mc_item_data;
643 cur_optimum_ptr -= item & MC_LEN_MASK;
644 saved_item = cur_optimum_ptr->mc_item_data;
645 cur_optimum_ptr->mc_item_data = item;
646 } while (cur_optimum_ptr != c->optimum);
648 /* Walk the list of items from beginning to end, encoding each item. */
650 lzms_encode_item(c, cur_optimum_ptr->mc_item_data);
651 cur_optimum_ptr += (cur_optimum_ptr->mc_item_data) & MC_LEN_MASK;
652 } while (cur_optimum_ptr != end_optimum_ptr);
655 /* Each bit costs 1 << LZMS_COST_SHIFT units. */
656 #define LZMS_COST_SHIFT 6
658 /*#define LZMS_RC_COSTS_USE_FLOATING_POINT*/
661 lzms_rc_costs[LZMS_PROBABILITY_MAX + 1];
663 #ifdef LZMS_RC_COSTS_USE_FLOATING_POINT
668 lzms_do_init_rc_costs(void)
670 /* Fill in a table that maps range coding probabilities needed to code a
671 * bit X (0 or 1) to the number of bits (scaled by a constant factor, to
672 * handle fractional costs) needed to code that bit X.
674 * Consider the range of the range decoder. To eliminate exactly half
675 * the range (logical probability of 0.5), we need exactly 1 bit. For
676 * lower probabilities we need more bits and for higher probabilities we
677 * need fewer bits. In general, a logical probability of N will
678 * eliminate the proportion 1 - N of the range; this information takes
679 * log2(1 / N) bits to encode.
681 * The below loop is simply calculating this number of bits for each
682 * possible probability allowed by the LZMS compression format, but
683 * without using real numbers. To handle fractional probabilities, each
684 * cost is multiplied by (1 << LZMS_COST_SHIFT). These techniques are
685 * based on those used by LZMA.
687 * Note that in LZMS, a probability x really means x / 64, and 0 / 64 is
688 * really interpreted as 1 / 64 and 64 / 64 is really interpreted as
691 for (u32 i = 0; i <= LZMS_PROBABILITY_MAX; i++) {
696 else if (prob == LZMS_PROBABILITY_MAX)
697 prob = LZMS_PROBABILITY_MAX - 1;
699 #ifdef LZMS_RC_COSTS_USE_FLOATING_POINT
700 lzms_rc_costs[i] = log2((double)LZMS_PROBABILITY_MAX / prob) *
701 (1 << LZMS_COST_SHIFT);
705 for (u32 j = 0; j < LZMS_COST_SHIFT; j++) {
708 while (w >= ((u32)1 << 16)) {
713 lzms_rc_costs[i] = (LZMS_PROBABILITY_BITS << LZMS_COST_SHIFT) -
720 lzms_init_rc_costs(void)
722 static pthread_once_t once = PTHREAD_ONCE_INIT;
724 pthread_once(&once, lzms_do_init_rc_costs);
727 /* Return the cost to range-encode the specified bit from the specified state.*/
729 lzms_rc_bit_cost(const struct lzms_range_encoder *enc, u8 cur_state, int bit)
734 prob_zero = enc->prob_entries[cur_state].num_recent_zero_bits;
737 prob_correct = prob_zero;
739 prob_correct = LZMS_PROBABILITY_MAX - prob_zero;
741 return lzms_rc_costs[prob_correct];
744 /* Return the cost to Huffman-encode the specified symbol. */
746 lzms_huffman_symbol_cost(const struct lzms_huffman_encoder *enc, unsigned sym)
748 return (u32)enc->lens[sym] << LZMS_COST_SHIFT;
751 /* Return the cost to encode the specified literal byte. */
753 lzms_literal_cost(const struct lzms_compressor *c, unsigned literal,
754 const struct lzms_adaptive_state *state)
756 return lzms_rc_bit_cost(&c->main_range_encoder, state->main_state, 0) +
757 lzms_huffman_symbol_cost(&c->literal_encoder, literal);
760 /* Update the table that directly provides the costs for small lengths. */
762 lzms_update_fast_length_costs(struct lzms_compressor *c)
768 for (len = 1; len < LZMS_NUM_FAST_LENGTHS; len++) {
770 while (len >= lzms_length_slot_base[slot + 1]) {
772 cost = (u32)(c->length_encoder.lens[slot] +
773 lzms_extra_length_bits[slot]) << LZMS_COST_SHIFT;
776 c->length_cost_fast[len] = cost;
780 /* Return the cost to encode the specified match length, which must be less than
781 * LZMS_NUM_FAST_LENGTHS. */
783 lzms_fast_length_cost(const struct lzms_compressor *c, u32 length)
785 LZMS_ASSERT(length < LZMS_NUM_FAST_LENGTHS);
786 return c->length_cost_fast[length];
789 /* Return the cost to encode the specified LZ match offset. */
791 lzms_lz_offset_cost(const struct lzms_compressor *c, u32 offset)
793 unsigned slot = lzms_get_offset_slot_fast(c, offset);
795 return (u32)(c->lz_offset_encoder.lens[slot] +
796 lzms_extra_offset_bits[slot]) << LZMS_COST_SHIFT;
800 * Consider coding the match at repeat offset index @rep_idx. Consider each
801 * length from the minimum (2) to the full match length (@rep_len).
804 lzms_consider_lz_repeat_offset_match(const struct lzms_compressor *c,
805 struct lzms_mc_pos_data *cur_optimum_ptr,
806 u32 rep_len, unsigned rep_idx)
813 base_cost = cur_optimum_ptr->cost;
815 base_cost += lzms_rc_bit_cost(&c->main_range_encoder,
816 cur_optimum_ptr->state.main_state, 1);
818 base_cost += lzms_rc_bit_cost(&c->match_range_encoder,
819 cur_optimum_ptr->state.match_state, 0);
821 base_cost += lzms_rc_bit_cost(&c->lz_match_range_encoder,
822 cur_optimum_ptr->state.lz_match_state, 1);
824 for (i = 0; i < rep_idx; i++)
825 base_cost += lzms_rc_bit_cost(&c->lz_repeat_match_range_encoders[i],
826 cur_optimum_ptr->state.lz_repeat_match_state[i], 1);
828 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
829 base_cost += lzms_rc_bit_cost(&c->lz_repeat_match_range_encoders[i],
830 cur_optimum_ptr->state.lz_repeat_match_state[i], 0);
834 cost = base_cost + lzms_fast_length_cost(c, len);
835 if (cost < (cur_optimum_ptr + len)->cost) {
836 (cur_optimum_ptr + len)->mc_item_data =
837 ((u64)rep_idx << MC_OFFSET_SHIFT) | len;
838 (cur_optimum_ptr + len)->cost = cost;
840 } while (++len <= rep_len);
844 * Consider coding each match in @matches as an explicit offset match.
846 * @matches must be sorted by strictly increasing length and strictly increasing
847 * offset. This is guaranteed by the match-finder.
849 * We consider each length from the minimum (2) to the longest
850 * (matches[num_matches - 1].len). For each length, we consider only the
851 * smallest offset for which that length is available. Although this is not
852 * guaranteed to be optimal due to the possibility of a larger offset costing
853 * less than a smaller offset to code, this is a very useful heuristic.
856 lzms_consider_lz_explicit_offset_matches(const struct lzms_compressor *c,
857 struct lzms_mc_pos_data *cur_optimum_ptr,
858 const struct lz_match matches[],
867 base_cost = cur_optimum_ptr->cost;
869 base_cost += lzms_rc_bit_cost(&c->main_range_encoder,
870 cur_optimum_ptr->state.main_state, 1);
872 base_cost += lzms_rc_bit_cost(&c->match_range_encoder,
873 cur_optimum_ptr->state.match_state, 0);
875 base_cost += lzms_rc_bit_cost(&c->lz_match_range_encoder,
876 cur_optimum_ptr->state.lz_match_state, 0);
880 position_cost = base_cost + lzms_lz_offset_cost(c, matches[i].offset);
882 cost = position_cost + lzms_fast_length_cost(c, len);
883 if (cost < (cur_optimum_ptr + len)->cost) {
884 (cur_optimum_ptr + len)->mc_item_data =
885 ((u64)(matches[i].offset + LZMS_OFFSET_OFFSET)
886 << MC_OFFSET_SHIFT) | len;
887 (cur_optimum_ptr + len)->cost = cost;
889 } while (++len <= matches[i].len);
890 } while (++i != num_matches);
894 lzms_init_adaptive_state(struct lzms_adaptive_state *state)
898 lzms_init_lz_lru_queues(&state->lru);
899 state->main_state = 0;
900 state->match_state = 0;
901 state->lz_match_state = 0;
902 for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
903 state->lz_repeat_match_state[i] = 0;
907 lzms_update_main_state(struct lzms_adaptive_state *state, int is_match)
909 state->main_state = ((state->main_state << 1) | is_match) % LZMS_NUM_MAIN_STATES;
913 lzms_update_match_state(struct lzms_adaptive_state *state, int is_delta)
915 state->match_state = ((state->match_state << 1) | is_delta) % LZMS_NUM_MATCH_STATES;
919 lzms_update_lz_match_state(struct lzms_adaptive_state *state, int is_repeat_offset)
921 state->lz_match_state = ((state->lz_match_state << 1) | is_repeat_offset) % LZMS_NUM_LZ_MATCH_STATES;
925 lzms_update_lz_repeat_match_state(struct lzms_adaptive_state *state, int rep_idx)
929 for (i = 0; i < rep_idx; i++)
930 state->lz_repeat_match_state[i] =
931 ((state->lz_repeat_match_state[i] << 1) | 1) %
932 LZMS_NUM_LZ_REPEAT_MATCH_STATES;
934 if (i < LZMS_NUM_RECENT_OFFSETS - 1)
935 state->lz_repeat_match_state[i] =
936 ((state->lz_repeat_match_state[i] << 1) | 0) %
937 LZMS_NUM_LZ_REPEAT_MATCH_STATES;
941 * The main near-optimal parsing routine.
943 * Briefly, the algorithm does an approximate minimum-cost path search to find a
944 * "near-optimal" sequence of matches and literals to output, based on the
945 * current cost model. The algorithm steps forward, position by position (byte
946 * by byte), and updates the minimum cost path to reach each later position that
947 * can be reached using a match or literal from the current position. This is
948 * essentially Dijkstra's algorithm in disguise: the graph nodes are positions,
949 * the graph edges are possible matches/literals to code, and the cost of each
950 * edge is the estimated number of bits that will be required to output the
951 * corresponding match or literal. But one difference is that we actually
952 * compute the lowest-cost path in pieces, where each piece is terminated when
953 * there are no choices to be made.
957 * - This does not output any delta matches.
959 * - The costs of literals and matches are estimated using the range encoder
960 * states and the semi-adaptive Huffman codes. Except for range encoding
961 * states, costs are assumed to be constant throughout a single run of the
962 * parsing algorithm, which can parse up to @optim_array_length bytes of data.
963 * This introduces a source of inaccuracy because the probabilities and
964 * Huffman codes can change over this part of the data.
967 lzms_near_optimal_parse(struct lzms_compressor *c)
969 const u8 *window_ptr;
970 const u8 *window_end;
971 struct lzms_mc_pos_data *cur_optimum_ptr;
972 struct lzms_mc_pos_data *end_optimum_ptr;
976 unsigned rep_max_idx;
983 window_ptr = c->cur_window;
984 window_end = window_ptr + c->cur_window_size;
986 lzms_init_adaptive_state(&c->optimum[0].state);
989 /* Start building a new list of items, which will correspond to the next
990 * piece of the overall minimum-cost path. */
992 cur_optimum_ptr = c->optimum;
993 cur_optimum_ptr->cost = 0;
994 end_optimum_ptr = cur_optimum_ptr;
996 /* States should currently be consistent with the encoders. */
997 LZMS_ASSERT(cur_optimum_ptr->state.main_state == c->main_range_encoder.state);
998 LZMS_ASSERT(cur_optimum_ptr->state.match_state == c->match_range_encoder.state);
999 LZMS_ASSERT(cur_optimum_ptr->state.lz_match_state == c->lz_match_range_encoder.state);
1000 for (i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++)
1001 LZMS_ASSERT(cur_optimum_ptr->state.lz_repeat_match_state[i] ==
1002 c->lz_repeat_match_range_encoders[i].state);
1004 if (window_ptr == window_end)
1007 /* The following loop runs once for each per byte in the window, except
1008 * in a couple shortcut cases. */
1011 /* Find explicit offset matches with the current position. */
1012 num_matches = lz_mf_get_matches(c->mf, c->matches);
1016 * Find the longest repeat offset match with the current
1021 * - Only search for repeat offset matches if the
1022 * match-finder already found at least one match.
1024 * - Only consider the longest repeat offset match. It
1025 * seems to be rare for the optimal parse to include a
1026 * repeat offset match that doesn't have the longest
1027 * length (allowing for the possibility that not all
1028 * of that length is actually used).
1030 if (likely(window_ptr - c->cur_window >= LZMS_MAX_INIT_RECENT_OFFSET)) {
1031 BUILD_BUG_ON(LZMS_NUM_RECENT_OFFSETS != 3);
1032 rep_max_len = lz_repsearch3(window_ptr,
1033 window_end - window_ptr,
1034 cur_optimum_ptr->state.lru.recent_offsets,
1041 /* If there's a very long repeat offset match,
1042 * choose it immediately. */
1043 if (rep_max_len >= c->params.nice_match_length) {
1045 lz_mf_skip_positions(c->mf, rep_max_len - 1);
1046 window_ptr += rep_max_len;
1048 if (cur_optimum_ptr != c->optimum)
1049 lzms_encode_item_list(c, cur_optimum_ptr);
1051 lzms_encode_lz_repeat_offset_match(c, rep_max_len,
1054 c->optimum[0].state = cur_optimum_ptr->state;
1056 lzms_update_main_state(&c->optimum[0].state, 1);
1057 lzms_update_match_state(&c->optimum[0].state, 0);
1058 lzms_update_lz_match_state(&c->optimum[0].state, 1);
1059 lzms_update_lz_repeat_match_state(&c->optimum[0].state,
1062 c->optimum[0].state.lru.upcoming_offset =
1063 c->optimum[0].state.lru.recent_offsets[rep_max_idx];
1065 for (i = rep_max_idx; i < LZMS_NUM_RECENT_OFFSETS; i++)
1066 c->optimum[0].state.lru.recent_offsets[i] =
1067 c->optimum[0].state.lru.recent_offsets[i + 1];
1069 lzms_update_lz_lru_queue(&c->optimum[0].state.lru);
1073 /* If reaching any positions for the first time,
1074 * initialize their costs to "infinity". */
1075 while (end_optimum_ptr < cur_optimum_ptr + rep_max_len)
1076 (++end_optimum_ptr)->cost = MC_INFINITE_COST;
1078 /* Consider coding a repeat offset match. */
1079 lzms_consider_lz_repeat_offset_match(c, cur_optimum_ptr,
1080 rep_max_len, rep_max_idx);
1083 longest_len = c->matches[num_matches - 1].len;
1085 /* If there's a very long explicit offset match, choose
1086 * it immediately. */
1087 if (longest_len >= c->params.nice_match_length) {
1089 lz_mf_skip_positions(c->mf, longest_len - 1);
1090 window_ptr += longest_len;
1092 if (cur_optimum_ptr != c->optimum)
1093 lzms_encode_item_list(c, cur_optimum_ptr);
1095 lzms_encode_lz_explicit_offset_match(c, longest_len,
1096 c->matches[num_matches - 1].offset);
1098 c->optimum[0].state = cur_optimum_ptr->state;
1100 lzms_update_main_state(&c->optimum[0].state, 1);
1101 lzms_update_match_state(&c->optimum[0].state, 0);
1102 lzms_update_lz_match_state(&c->optimum[0].state, 0);
1104 c->optimum[0].state.lru.upcoming_offset =
1105 c->matches[num_matches - 1].offset;
1107 lzms_update_lz_lru_queue(&c->optimum[0].state.lru);
1111 /* If reaching any positions for the first time,
1112 * initialize their costs to "infinity". */
1113 while (end_optimum_ptr < cur_optimum_ptr + longest_len)
1114 (++end_optimum_ptr)->cost = MC_INFINITE_COST;
1116 /* Consider coding an explicit offset match. */
1117 lzms_consider_lz_explicit_offset_matches(c, cur_optimum_ptr,
1118 c->matches, num_matches);
1120 /* No matches found. The only choice at this position
1121 * is to code a literal. */
1123 if (end_optimum_ptr == cur_optimum_ptr)
1124 (++end_optimum_ptr)->cost = MC_INFINITE_COST;
1127 /* Consider coding a literal.
1129 * To avoid an extra unpredictable brench, actually checking the
1130 * preferability of coding a literal is integrated into the
1131 * adaptive state update code below. */
1132 literal = *window_ptr++;
1133 cost = cur_optimum_ptr->cost +
1134 lzms_literal_cost(c, literal, &cur_optimum_ptr->state);
1136 /* Advance to the next position. */
1139 /* The lowest-cost path to the current position is now known.
1140 * Finalize the adaptive state that results from taking this
1141 * lowest-cost path. */
1143 if (cost < cur_optimum_ptr->cost) {
1145 cur_optimum_ptr->cost = cost;
1146 cur_optimum_ptr->mc_item_data = ((u64)literal << MC_OFFSET_SHIFT) | 1;
1148 cur_optimum_ptr->state = (cur_optimum_ptr - 1)->state;
1150 lzms_update_main_state(&cur_optimum_ptr->state, 0);
1152 cur_optimum_ptr->state.lru.upcoming_offset = 0;
1155 len = cur_optimum_ptr->mc_item_data & MC_LEN_MASK;
1156 offset_data = cur_optimum_ptr->mc_item_data >> MC_OFFSET_SHIFT;
1158 cur_optimum_ptr->state = (cur_optimum_ptr - len)->state;
1160 lzms_update_main_state(&cur_optimum_ptr->state, 1);
1161 lzms_update_match_state(&cur_optimum_ptr->state, 0);
1163 if (offset_data >= LZMS_NUM_RECENT_OFFSETS) {
1165 /* Explicit offset LZ match */
1167 lzms_update_lz_match_state(&cur_optimum_ptr->state, 0);
1169 cur_optimum_ptr->state.lru.upcoming_offset =
1170 offset_data - LZMS_OFFSET_OFFSET;
1172 /* Repeat offset LZ match */
1174 lzms_update_lz_match_state(&cur_optimum_ptr->state, 1);
1175 lzms_update_lz_repeat_match_state(&cur_optimum_ptr->state,
1178 cur_optimum_ptr->state.lru.upcoming_offset =
1179 cur_optimum_ptr->state.lru.recent_offsets[offset_data];
1181 for (i = offset_data; i < LZMS_NUM_RECENT_OFFSETS; i++)
1182 cur_optimum_ptr->state.lru.recent_offsets[i] =
1183 cur_optimum_ptr->state.lru.recent_offsets[i + 1];
1187 lzms_update_lz_lru_queue(&cur_optimum_ptr->state.lru);
1190 * This loop will terminate when either of the following
1191 * conditions is true:
1193 * (1) cur_optimum_ptr == end_optimum_ptr
1195 * There are no paths that extend beyond the current
1196 * position. In this case, any path to a later position
1197 * must pass through the current position, so we can go
1198 * ahead and choose the list of items that led to this
1201 * (2) cur_optimum_ptr == c->optimum_end
1203 * This bounds the number of times the algorithm can step
1204 * forward before it is guaranteed to start choosing items.
1205 * This limits the memory usage. It also guarantees that
1206 * the parser will not go too long without updating the
1207 * probability tables.
1209 * Note: no check for end-of-window is needed because
1210 * end-of-window will trigger condition (1).
1212 if (cur_optimum_ptr == end_optimum_ptr ||
1213 cur_optimum_ptr == c->optimum_end)
1215 c->optimum[0].state = cur_optimum_ptr->state;
1220 /* Output the current list of items that constitute the minimum-cost
1221 * path to the current position. */
1222 lzms_encode_item_list(c, cur_optimum_ptr);
1227 lzms_init_range_encoder(struct lzms_range_encoder *enc,
1228 struct lzms_range_encoder_raw *rc, u32 num_states)
1232 LZMS_ASSERT(is_power_of_2(num_states));
1233 enc->mask = num_states - 1;
1234 for (u32 i = 0; i < num_states; i++) {
1235 enc->prob_entries[i].num_recent_zero_bits = LZMS_INITIAL_PROBABILITY;
1236 enc->prob_entries[i].recent_bits = LZMS_INITIAL_RECENT_BITS;
1241 lzms_init_huffman_encoder(struct lzms_huffman_encoder *enc,
1242 struct lzms_output_bitstream *os,
1244 unsigned rebuild_freq)
1247 enc->num_syms_written = 0;
1248 enc->rebuild_freq = rebuild_freq;
1249 enc->num_syms = num_syms;
1250 for (unsigned i = 0; i < num_syms; i++)
1251 enc->sym_freqs[i] = 1;
1253 make_canonical_huffman_code(enc->num_syms,
1254 LZMS_MAX_CODEWORD_LEN,
1260 /* Prepare the LZMS compressor for compressing a block of data. */
1262 lzms_prepare_compressor(struct lzms_compressor *c, const u8 *udata, u32 ulen,
1263 le16 *cdata, u32 clen16)
1265 unsigned num_offset_slots;
1267 /* Copy the uncompressed data into the @c->cur_window buffer. */
1268 memcpy(c->cur_window, udata, ulen);
1269 c->cur_window_size = ulen;
1271 /* Initialize the raw range encoder (writing forwards). */
1272 lzms_range_encoder_raw_init(&c->rc, cdata, clen16);
1274 /* Initialize the output bitstream for Huffman symbols and verbatim bits
1275 * (writing backwards). */
1276 lzms_output_bitstream_init(&c->os, cdata, clen16);
1278 /* Calculate the number of offset slots required. */
1279 num_offset_slots = lzms_get_offset_slot(ulen - 1) + 1;
1281 /* Initialize a Huffman encoder for each alphabet. */
1282 lzms_init_huffman_encoder(&c->literal_encoder, &c->os,
1283 LZMS_NUM_LITERAL_SYMS,
1284 LZMS_LITERAL_CODE_REBUILD_FREQ);
1286 lzms_init_huffman_encoder(&c->lz_offset_encoder, &c->os,
1288 LZMS_LZ_OFFSET_CODE_REBUILD_FREQ);
1290 lzms_init_huffman_encoder(&c->length_encoder, &c->os,
1292 LZMS_LENGTH_CODE_REBUILD_FREQ);
1294 lzms_init_huffman_encoder(&c->delta_offset_encoder, &c->os,
1296 LZMS_DELTA_OFFSET_CODE_REBUILD_FREQ);
1298 lzms_init_huffman_encoder(&c->delta_power_encoder, &c->os,
1299 LZMS_NUM_DELTA_POWER_SYMS,
1300 LZMS_DELTA_POWER_CODE_REBUILD_FREQ);
1302 /* Initialize range encoders, all of which wrap around the same
1303 * lzms_range_encoder_raw. */
1304 lzms_init_range_encoder(&c->main_range_encoder,
1305 &c->rc, LZMS_NUM_MAIN_STATES);
1307 lzms_init_range_encoder(&c->match_range_encoder,
1308 &c->rc, LZMS_NUM_MATCH_STATES);
1310 lzms_init_range_encoder(&c->lz_match_range_encoder,
1311 &c->rc, LZMS_NUM_LZ_MATCH_STATES);
1313 for (unsigned i = 0; i < ARRAY_LEN(c->lz_repeat_match_range_encoders); i++)
1314 lzms_init_range_encoder(&c->lz_repeat_match_range_encoders[i],
1315 &c->rc, LZMS_NUM_LZ_REPEAT_MATCH_STATES);
1317 lzms_init_range_encoder(&c->delta_match_range_encoder,
1318 &c->rc, LZMS_NUM_DELTA_MATCH_STATES);
1320 for (unsigned i = 0; i < ARRAY_LEN(c->delta_repeat_match_range_encoders); i++)
1321 lzms_init_range_encoder(&c->delta_repeat_match_range_encoders[i],
1322 &c->rc, LZMS_NUM_DELTA_REPEAT_MATCH_STATES);
1324 /* Set initial length costs for lengths < LZMS_NUM_FAST_LENGTHS. */
1325 lzms_update_fast_length_costs(c);
1328 /* Flush the output streams, prepare the final compressed data, and return its
1331 * A return value of 0 indicates that the data could not be compressed to fit in
1332 * the available space. */
1334 lzms_finalize(struct lzms_compressor *c, u8 *cdata, size_t csize_avail)
1336 size_t num_forwards_bytes;
1337 size_t num_backwards_bytes;
1339 /* Flush both the forwards and backwards streams, and make sure they
1340 * didn't cross each other and start overwriting each other's data. */
1341 if (!lzms_output_bitstream_flush(&c->os))
1344 if (!lzms_range_encoder_raw_flush(&c->rc))
1347 if (c->rc.next > c->os.next)
1350 /* Now the compressed buffer contains the data output by the forwards
1351 * bitstream, then empty space, then data output by the backwards
1352 * bitstream. Move the data output by the backwards bitstream to be
1353 * adjacent to the data output by the forward bitstream, and calculate
1354 * the compressed size that this results in. */
1355 num_forwards_bytes = (u8*)c->rc.next - (u8*)cdata;
1356 num_backwards_bytes = ((u8*)cdata + csize_avail) - (u8*)c->os.next;
1358 memmove(cdata + num_forwards_bytes, c->os.next, num_backwards_bytes);
1360 return num_forwards_bytes + num_backwards_bytes;
1363 /* Set internal compression parameters for the specified compression level and
1364 * maximum window size. */
1366 lzms_build_params(unsigned int compression_level,
1367 struct lzms_compressor_params *params)
1369 /* Allow length 2 matches if the compression level is sufficiently high.
1371 if (compression_level >= 45)
1372 params->min_match_length = 2;
1374 params->min_match_length = 3;
1376 /* Scale nice_match_length and max_search_depth with the compression
1377 * level. But to allow an optimization on length cost calculations,
1378 * don't allow nice_match_length to exceed LZMS_NUM_FAST_LENGTH. */
1379 params->nice_match_length = ((u64)compression_level * 32) / 50;
1380 if (params->nice_match_length < params->min_match_length)
1381 params->nice_match_length = params->min_match_length;
1382 if (params->nice_match_length > LZMS_NUM_FAST_LENGTHS)
1383 params->nice_match_length = LZMS_NUM_FAST_LENGTHS;
1384 params->max_search_depth = compression_level;
1386 params->optim_array_length = 1024;
1389 /* Given the internal compression parameters and maximum window size, build the
1390 * Lempel-Ziv match-finder parameters. */
1392 lzms_build_mf_params(const struct lzms_compressor_params *lzms_params,
1393 u32 max_window_size, struct lz_mf_params *mf_params)
1395 memset(mf_params, 0, sizeof(*mf_params));
1397 /* Choose an appropriate match-finding algorithm. */
1398 if (max_window_size <= 2097152)
1399 mf_params->algorithm = LZ_MF_BINARY_TREES;
1400 else if (max_window_size <= 33554432)
1401 mf_params->algorithm = LZ_MF_LCP_INTERVAL_TREE;
1403 mf_params->algorithm = LZ_MF_LINKED_SUFFIX_ARRAY;
1405 mf_params->max_window_size = max_window_size;
1406 mf_params->min_match_len = lzms_params->min_match_length;
1407 mf_params->max_search_depth = lzms_params->max_search_depth;
1408 mf_params->nice_match_len = lzms_params->nice_match_length;
1412 lzms_free_compressor(void *_c);
1415 lzms_get_needed_memory(size_t max_block_size, unsigned int compression_level)
1417 struct lzms_compressor_params params;
1418 struct lz_mf_params mf_params;
1421 if (max_block_size >= INT32_MAX)
1424 lzms_build_params(compression_level, ¶ms);
1425 lzms_build_mf_params(¶ms, max_block_size, &mf_params);
1427 size += sizeof(struct lzms_compressor);
1430 size += max_block_size;
1433 size += lz_mf_get_needed_memory(mf_params.algorithm, max_block_size);
1436 size += min(params.max_search_depth, params.nice_match_length) *
1437 sizeof(struct lz_match);
1440 size += (params.optim_array_length + params.nice_match_length) *
1441 sizeof(struct lzms_mc_pos_data);
1447 lzms_create_compressor(size_t max_block_size, unsigned int compression_level,
1450 struct lzms_compressor *c;
1451 struct lzms_compressor_params params;
1452 struct lz_mf_params mf_params;
1454 if (max_block_size >= INT32_MAX)
1455 return WIMLIB_ERR_INVALID_PARAM;
1457 lzms_build_params(compression_level, ¶ms);
1458 lzms_build_mf_params(¶ms, max_block_size, &mf_params);
1459 if (!lz_mf_params_valid(&mf_params))
1460 return WIMLIB_ERR_INVALID_PARAM;
1462 c = CALLOC(1, sizeof(struct lzms_compressor));
1468 c->cur_window = MALLOC(max_block_size);
1472 c->mf = lz_mf_alloc(&mf_params);
1476 c->matches = MALLOC(min(params.max_search_depth,
1477 params.nice_match_length) *
1478 sizeof(struct lz_match));
1482 c->optimum = MALLOC((params.optim_array_length +
1483 params.nice_match_length) *
1484 sizeof(struct lzms_mc_pos_data));
1487 c->optimum_end = &c->optimum[params.optim_array_length];
1491 lzms_init_rc_costs();
1493 lzms_init_fast_slots(c);
1499 lzms_free_compressor(c);
1500 return WIMLIB_ERR_NOMEM;
1504 lzms_compress(const void *uncompressed_data, size_t uncompressed_size,
1505 void *compressed_data, size_t compressed_size_avail, void *_c)
1507 struct lzms_compressor *c = _c;
1509 /* Don't bother compressing extremely small inputs. */
1510 if (uncompressed_size < 4)
1513 /* Cap the available compressed size to a 32-bit integer and round it
1514 * down to the nearest multiple of 2. */
1515 if (compressed_size_avail > UINT32_MAX)
1516 compressed_size_avail = UINT32_MAX;
1517 if (compressed_size_avail & 1)
1518 compressed_size_avail--;
1520 /* Initialize the compressor structures. */
1521 lzms_prepare_compressor(c, uncompressed_data, uncompressed_size,
1522 compressed_data, compressed_size_avail / 2);
1524 /* Preprocess the uncompressed data. */
1525 lzms_x86_filter(c->cur_window, c->cur_window_size,
1526 c->last_target_usages, false);
1528 /* Load the window into the match-finder. */
1529 lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
1531 /* Compute and encode a literal/match sequence that decompresses to the
1532 * preprocessed data. */
1533 lzms_near_optimal_parse(c);
1535 /* Return the compressed data size or 0. */
1536 return lzms_finalize(c, compressed_data, compressed_size_avail);
1540 lzms_free_compressor(void *_c)
1542 struct lzms_compressor *c = _c;
1545 FREE(c->cur_window);
1553 const struct compressor_ops lzms_compressor_ops = {
1554 .get_needed_memory = lzms_get_needed_memory,
1555 .create_compressor = lzms_create_compressor,
1556 .compress = lzms_compress,
1557 .free_compressor = lzms_free_compressor,