/*
* lzx-compress.c
+ *
+ * A compressor that produces output compatible with the LZX compression format.
*/
/*
* that position at previous positions in the window. With LZX, the minimum
* match length is 2 and the maximum match length is 257. The only restriction
* on offsets is that LZX does not allow the last 2 bytes of the window to match
- * the the beginning of the window.
- *
- * Depending on how good a compression ratio we want (see the "Match-choosing"
- * section), we may want to find: (a) all matches, or (b) just the longest
- * match, or (c) just some "promising" matches that we are able to find quickly,
- * or (d) just the longest match that we're able to find quickly. Below we
- * introduce the match-finding methods that the code currently uses or has
- * previously used:
- *
- * - Hash chains. Maintain a table that maps hash codes, computed from
- * fixed-length byte sequences, to linked lists containing previous window
- * positions. To search for matches, compute the hash for the current
- * position in the window and search the appropriate hash chain. When
- * advancing to the next position, prepend the current position to the
- * appropriate hash list. This is a good approach for producing matches with
- * stategy (d) and is useful for fast compression. Therefore, we provide an
- * option to use this method for LZX compression. See lz_hash.c for the
- * implementation.
- *
- * - Binary trees. Similar to hash chains, but each hash bucket contains a
- * binary tree of previous window positions rather than a linked list. This
- * is a good approach for producing matches with stategy (c) and is useful for
- * achieving a good compression ratio. Therefore, we provide an option to use
- * this method; see lz_bt.c for the implementation.
- *
- * - Suffix arrays. This code previously used this method to produce matches
- * with stategy (c), but I've dropped it because it was slower than the binary
- * trees approach, used more memory, and did not improve the compression ratio
- * enough to compensate. Download wimlib v1.6.2 if you want the code.
- * However, the suffix array method was basically as follows. Build the
- * suffix array for the entire window. The suffix array contains each
- * possible window position, sorted by the lexicographic order of the strings
- * that begin at those positions. Find the matches at a given position by
- * searching the suffix array outwards, in both directions, from the suffix
- * array slot for that position. This produces the longest matches first, but
- * "matches" that actually occur at later positions in the window must be
- * skipped. To do this skipping, use an auxiliary array with dynamically
- * constructed linked lists. Also, use the inverse suffix array to quickly
- * find the suffix array slot for a given position without doing a binary
- * search.
+ * the beginning of the window.
+ *
+ * There are a number of algorithms that can be used for this, including hash
+ * chains, binary trees, and suffix arrays. Binary trees generally work well
+ * for LZX compression since it uses medium-size windows (2^15 to 2^21 bytes).
+ * However, when compressing in a fast mode where many positions are skipped
+ * (not searched for matches), hash chains are faster.
+ *
+ * Since the match-finders are not specific to LZX, I will not explain them in
+ * detail here. Instead, see lz_hash_chains.c and lz_binary_trees.c.
*
* ----------------------------------------------------------------------------
*
* for example. Therefore, for fast compression we combine lazy parsing with
* the hash chain max-finder. For normal/high compression we combine
* near-optimal parsing with the binary tree match-finder.
- *
- * Anyway, if you've read through this comment, you hopefully should have a
- * better idea of why things are done in a certain way in this LZX compressor,
- * as well as in other compressors for LZ77-based formats (including third-party
- * ones). In my opinion, the phrase "compression algorithm" is often mis-used
- * in place of "compression format", since there can be many different
- * algorithms that all generate compressed data in the same format. The
- * challenge is to design an algorithm that is efficient but still gives a good
- * compression ratio.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-#include "wimlib.h"
#include "wimlib/compressor_ops.h"
#include "wimlib/compress_common.h"
#include "wimlib/endianness.h"
#include "wimlib/error.h"
-#include "wimlib/lz.h"
-#include "wimlib/lz_hash.h"
-#include "wimlib/lz_bt.h"
+#include "wimlib/lz_mf.h"
+#include "wimlib/lz_repsearch.h"
#include "wimlib/lzx.h"
#include "wimlib/util.h"
#include <string.h>
-#ifdef ENABLE_LZX_DEBUG
-# include "wimlib/decompress_common.h"
-#endif
-
-#define LZX_OPTIM_ARRAY_SIZE 4096
+#define LZX_OPTIM_ARRAY_LENGTH 4096
#define LZX_DIV_BLOCK_SIZE 32768
-#define LZX_CACHE_PER_POS 10
+#define LZX_CACHE_PER_POS 8
-#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
-#define LZX_CACHE_SIZE (LZX_CACHE_LEN * sizeof(struct raw_match))
+#define LZX_MAX_MATCHES_PER_POS (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
-/* Dependent on behavior of lz_bt_get_matches(). */
-#define LZX_MAX_MATCHES_PER_POS (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
+#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
/* Codewords for the LZX main, length, and aligned offset Huffman codes */
struct lzx_codewords {
*
* If a codeword has zero frequency, it must still be assigned some nonzero cost
* --- generally a high cost, since even if it gets used in the next iteration,
- * it probably will not be used very times. */
+ * it probably will not be used very many times. */
struct lzx_costs {
u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u8 len[LZX_LENCODE_NUM_SYMBOLS];
/* Tables for tallying symbol frequencies in the three LZX alphabets */
struct lzx_freqs {
- input_idx_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- input_idx_t len[LZX_LENCODE_NUM_SYMBOLS];
- input_idx_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
+ u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
+ u32 len[LZX_LENCODE_NUM_SYMBOLS];
+ u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
/* LZX intermediate match/literal format */
-struct lzx_match {
+struct lzx_item {
/* Bit Description
*
* 31 1 if a match, 0 if a literal.
int block_type;
/* 0-based position in the window at which this block starts. */
- input_idx_t window_pos;
+ u32 window_pos;
/* The number of bytes of uncompressed data this block represents. */
- input_idx_t block_size;
+ u32 block_size;
/* The match/literal sequence for this block. */
- struct lzx_match *chosen_matches;
+ struct lzx_item *chosen_items;
- /* The length of the @chosen_matches sequence. */
- input_idx_t num_chosen_matches;
+ /* The length of the @chosen_items sequence. */
+ u32 num_chosen_items;
/* Huffman codes for this block. */
struct lzx_codes codes;
};
+struct lzx_compressor;
+
+struct lzx_compressor_params {
+ struct lz_match (*choose_item_func)(struct lzx_compressor *);
+ enum lz_mf_algo mf_algo;
+ u32 num_optim_passes;
+ u32 min_match_length;
+ u32 nice_match_length;
+ u32 max_search_depth;
+};
+
/* State of the LZX compressor. */
struct lzx_compressor {
- /* The parameters that were used to create the compressor. */
- struct wimlib_lzx_compressor_params params;
-
/* The buffer of data to be compressed.
*
* 0xe8 byte preprocessing is done directly on the data here before
* Note that this compressor does *not* use a real sliding window!!!!
* It's not needed in the WIM format, since every chunk is compressed
* independently. This is by design, to allow random access to the
- * chunks.
- *
- * We reserve a few extra bytes to potentially allow reading off the end
- * of the array in the match-finding code for optimization purposes
- * (currently only needed for the hash chain match-finder). */
- u8 *window;
+ * chunks. */
+ u8 *cur_window;
/* Number of bytes of data to be compressed, which is the number of
- * bytes of data in @window that are actually valid. */
- input_idx_t window_size;
+ * bytes of data in @cur_window that are actually valid. */
+ u32 cur_window_size;
- /* Allocated size of the @window. */
- input_idx_t max_window_size;
+ /* Allocated size of @cur_window. */
+ u32 max_window_size;
+
+ /* log2 order of the LZX window size for LZ match offset encoding
+ * purposes. Will be >= LZX_MIN_WINDOW_ORDER and <=
+ * LZX_MAX_WINDOW_ORDER.
+ *
+ * Note: 1 << @window_order is normally equal to @max_window_size, but
+ * it will be greater than @max_window_size in the event that the
+ * compressor was created with a non-power-of-2 block size. (See
+ * lzx_get_window_order().) */
+ unsigned window_order;
- /* Number of symbols in the main alphabet (depends on the
- * @max_window_size since it determines the maximum allowed offset). */
+ /* Compression parameters. */
+ struct lzx_compressor_params params;
+
+ unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
+ void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
+
+ /* Number of symbols in the main alphabet (depends on the @window_order
+ * since it determines the maximum allowed offset). */
unsigned num_main_syms;
/* The current match offset LRU queue. */
/* Space for the sequences of matches/literals that were chosen for each
* block. */
- struct lzx_match *chosen_matches;
+ struct lzx_item *chosen_items;
/* Information about the LZX blocks the preprocessed input was divided
* into. */
/* The current cost model. */
struct lzx_costs costs;
- /* Fast algorithm only: Array of hash table links. */
- input_idx_t *prev_tab;
-
- /* Slow algorithm only: Binary tree match-finder. */
- struct lz_bt mf;
+ /* Lempel-Ziv match-finder. */
+ struct lz_mf *mf;
/* Position in window of next match to return. */
- input_idx_t match_window_pos;
+ u32 match_window_pos;
/* The end-of-block position. We can't allow any matches to span this
* position. */
- input_idx_t match_window_end;
+ u32 match_window_end;
- /* Matches found by the match-finder are cached in the following array
- * to achieve a slight speedup when the same matches are needed on
+ /* When doing more than one match-choosing pass over the data, matches
+ * found by the match-finder are cached in the following array to
+ * achieve a slight speedup when the same matches are needed on
* subsequent passes. This is suboptimal because different matches may
* be preferred with different cost models, but seems to be a worthwhile
* speedup. */
- struct raw_match *cached_matches;
- struct raw_match *cache_ptr;
- bool matches_cached;
- struct raw_match *cache_limit;
+ struct lz_match *cached_matches;
+ struct lz_match *cache_ptr;
+ struct lz_match *cache_limit;
- /* Match-chooser state.
+ /* Match-chooser state, used when doing near-optimal parsing.
+ *
* When matches have been chosen, optimum_cur_idx is set to the position
* in the window of the next match/literal to return and optimum_end_idx
* is set to the position in the window at the end of the last
struct lzx_mc_pos_data *optimum;
unsigned optimum_cur_idx;
unsigned optimum_end_idx;
+
+ /* Previous match, used when doing lazy parsing. */
+ struct lz_match prev_match;
};
/*
/* Position of the start of the match or literal that
* was taken to get to this position in the approximate
* minimum-cost parse. */
- input_idx_t link;
+ u32 link;
/* Offset (as in an LZ (length, offset) pair) of the
* match or literal that was taken to get to this
* position in the approximate minimum-cost parse. */
- input_idx_t match_offset;
+ u32 match_offset;
} prev;
struct {
/* Position at which the match or literal starting at
* this position ends in the minimum-cost parse. */
- input_idx_t link;
+ u32 link;
/* Offset (as in an LZ (length, offset) pair) of the
* match or literal starting at this position in the
* approximate minimum-cost parse. */
- input_idx_t match_offset;
+ u32 match_offset;
} next;
};
/* Adaptive state that exists after an approximate minimum-cost path to
- * reach this position is taken. */
+ * reach this position is taken.
+ *
+ * Note: we update this whenever we update the pending minimum-cost
+ * path. This is in contrast to LZMA, which also has an optimal parser
+ * that maintains a repeat offset queue per position, but will only
+ * compute the queue once that position is actually reached in the
+ * parse, meaning that matches are being considered *starting* at that
+ * position. However, the two methods seem to have approximately the
+ * same performance if appropriate optimizations are used. Intuitively
+ * the LZMA method seems faster, but it actually suffers from 1-2 extra
+ * hard-to-predict branches at each position. Probably it works better
+ * for LZMA than LZX because LZMA has a larger adaptive state than LZX,
+ * and the LZMA encoder considers more possibilities. */
struct lzx_lru_queue queue;
};
+
+/*
+ * Structure to keep track of the current state of sending bits to the
+ * compressed output buffer.
+ *
+ * The LZX bitstream is encoded as a sequence of 16-bit coding units.
+ */
+struct lzx_output_bitstream {
+
+ /* Bits that haven't yet been written to the output buffer. */
+ u32 bitbuf;
+
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitcount;
+
+ /* Pointer to the start of the output buffer. */
+ le16 *start;
+
+ /* Pointer to the position in the output buffer at which the next coding
+ * unit should be written. */
+ le16 *next;
+
+ /* Pointer past the end of the output buffer. */
+ le16 *end;
+};
+
+/*
+ * Initialize the output bitstream.
+ *
+ * @os
+ * The output bitstream structure to initialize.
+ * @buffer
+ * The buffer being written to.
+ * @size
+ * Size of @buffer, in bytes.
+ */
+static void
+lzx_init_output(struct lzx_output_bitstream *os, void *buffer, u32 size)
+{
+ os->bitbuf = 0;
+ os->bitcount = 0;
+ os->start = buffer;
+ os->next = os->start;
+ os->end = os->start + size / sizeof(le16);
+}
+
+/*
+ * Write some bits to the output bitstream.
+ *
+ * The bits are given by the low-order @num_bits bits of @bits. Higher-order
+ * bits in @bits cannot be set. At most 17 bits can be written at once.
+ *
+ * @max_bits is a compile-time constant that specifies the maximum number of
+ * bits that can ever be written at the call site. Currently, it is used to
+ * optimize away the conditional code for writing a second 16-bit coding unit
+ * when writing fewer than 17 bits.
+ *
+ * If the output buffer space is exhausted, then the bits will be ignored, and
+ * lzx_flush_output() will return 0 when it gets called.
+ */
+static _always_inline_attribute void
+lzx_write_varbits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits,
+ const unsigned int max_num_bits)
+{
+ /* This code is optimized for LZX, which never needs to write more than
+ * 17 bits at once. */
+ LZX_ASSERT(num_bits <= 17);
+ LZX_ASSERT(num_bits <= max_num_bits);
+ LZX_ASSERT(os->bitcount <= 15);
+
+ /* Add the bits to the bit buffer variable. @bitcount will be at most
+ * 15, so there will be just enough space for the maximum possible
+ * @num_bits of 17. */
+ os->bitcount += num_bits;
+ os->bitbuf = (os->bitbuf << num_bits) | bits;
+
+ /* Check whether any coding units need to be written. */
+ if (os->bitcount >= 16) {
+
+ os->bitcount -= 16;
+
+ /* Write a coding unit, unless it would overflow the buffer. */
+ if (os->next != os->end)
+ *os->next++ = cpu_to_le16(os->bitbuf >> os->bitcount);
+
+ /* If writing 17 bits, a second coding unit might need to be
+ * written. But because 'max_num_bits' is a compile-time
+ * constant, the compiler will optimize away this code at most
+ * call sites. */
+ if (max_num_bits == 17 && os->bitcount == 16) {
+ if (os->next != os->end)
+ *os->next++ = cpu_to_le16(os->bitbuf);
+ os->bitcount = 0;
+ }
+ }
+}
+
+/* Use when @num_bits is a compile-time constant. Otherwise use
+ * lzx_write_varbits(). */
+static _always_inline_attribute void
+lzx_write_bits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits)
+{
+ lzx_write_varbits(os, bits, num_bits, num_bits);
+}
+
+/*
+ * Flush the last coding unit to the output buffer if needed. Return the total
+ * number of bytes written to the output buffer, or 0 if an overflow occurred.
+ */
+static u32
+lzx_flush_output(struct lzx_output_bitstream *os)
+{
+ if (os->next == os->end)
+ return 0;
+
+ if (os->bitcount != 0)
+ *os->next++ = cpu_to_le16(os->bitbuf << (16 - os->bitcount));
+
+ return (const u8 *)os->next - (const u8 *)os->start;
+}
+
/* Returns the LZX position slot that corresponds to a given match offset,
* taking into account the recent offset queue and updating it if the offset is
* found in it. */
/*
* Output a precomputed LZX match.
*
- * @out:
+ * @os:
* The bitstream to which to write the match.
- * @block_type:
- * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
- * LZX_BLOCKTYPE_VERBATIM)
+ * @ones_if_aligned
+ * A mask of all ones if the block is of type LZX_BLOCKTYPE_ALIGNED,
+ * otherwise 0.
* @match:
- * The match, as a (length, offset) pair.
+ * The match data.
* @codes:
* Pointer to a structure that contains the codewords for the main, length,
* and aligned offset Huffman codes for the current LZX compressed block.
*/
static void
-lzx_write_match(struct output_bitstream *out, int block_type,
- struct lzx_match match, const struct lzx_codes *codes)
+lzx_write_match(struct lzx_output_bitstream *os, unsigned ones_if_aligned,
+ struct lzx_item match, const struct lzx_codes *codes)
{
- /* low 8 bits are the match length minus 2 */
unsigned match_len_minus_2 = match.data & 0xff;
- /* Next 17 bits are the position footer */
- unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
- /* Next 6 bits are the position slot. */
- unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
+ u32 position_footer = (match.data >> 8) & 0x1ffff;
+ unsigned position_slot = (match.data >> 25) & 0x3f;
unsigned len_header;
unsigned len_footer;
unsigned main_symbol;
unsigned num_extra_bits;
- unsigned verbatim_bits;
- unsigned aligned_bits;
/* If the match length is less than MIN_MATCH_LEN (= 2) +
- * NUM_PRIMARY_LENS (= 7), the length header contains
- * the match length minus MIN_MATCH_LEN, and there is no
- * length footer.
+ * NUM_PRIMARY_LENS (= 7), the length header contains the match length
+ * minus MIN_MATCH_LEN, and there is no length footer.
*
- * Otherwise, the length header contains
- * NUM_PRIMARY_LENS, and the length footer contains
- * the match length minus NUM_PRIMARY_LENS minus
+ * Otherwise, the length header contains NUM_PRIMARY_LENS, and the
+ * length footer contains the match length minus NUM_PRIMARY_LENS minus
* MIN_MATCH_LEN. */
if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
len_header = match_len_minus_2;
main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
/* Output main symbol. */
- bitstream_put_bits(out, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol]);
+ lzx_write_varbits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol],
+ LZX_MAX_MAIN_CODEWORD_LEN);
/* If there is a length footer, output it using the
* length Huffman code. */
- if (len_header == LZX_NUM_PRIMARY_LENS)
- bitstream_put_bits(out, codes->codewords.len[len_footer],
- codes->lens.len[len_footer]);
+ if (len_header == LZX_NUM_PRIMARY_LENS) {
+ lzx_write_varbits(os, codes->codewords.len[len_footer],
+ codes->lens.len[len_footer],
+ LZX_MAX_LEN_CODEWORD_LEN);
+ }
+
+ /* Output the position footer. */
num_extra_bits = lzx_get_num_extra_bits(position_slot);
- /* For aligned offset blocks with at least 3 extra bits, output the
- * verbatim bits literally, then the aligned bits encoded using the
- * aligned offset code. Otherwise, only the verbatim bits need to be
- * output. */
- if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
+ if ((num_extra_bits & ones_if_aligned) >= 3) {
- verbatim_bits = position_footer >> 3;
- bitstream_put_bits(out, verbatim_bits,
- num_extra_bits - 3);
+ /* Aligned offset blocks: The low 3 bits of the position footer
+ * are Huffman-encoded using the aligned offset code. The
+ * remaining bits are output literally. */
- aligned_bits = (position_footer & 7);
- bitstream_put_bits(out,
- codes->codewords.aligned[aligned_bits],
- codes->lens.aligned[aligned_bits]);
+ lzx_write_varbits(os,
+ position_footer >> 3, num_extra_bits - 3, 14);
+
+ lzx_write_varbits(os,
+ codes->codewords.aligned[position_footer & 7],
+ codes->lens.aligned[position_footer & 7],
+ LZX_MAX_ALIGNED_CODEWORD_LEN);
} else {
- /* verbatim bits is the same as the position
- * footer, in this case. */
- bitstream_put_bits(out, position_footer, num_extra_bits);
+ /* Verbatim blocks, or fewer than 3 extra bits: All position
+ * footer bits are output literally. */
+ lzx_write_varbits(os, position_footer, num_extra_bits, 17);
}
}
/* Output an LZX literal (encoded with the main Huffman code). */
static void
-lzx_write_literal(struct output_bitstream *out, u8 literal,
+lzx_write_literal(struct lzx_output_bitstream *os, unsigned literal,
const struct lzx_codes *codes)
{
- bitstream_put_bits(out,
- codes->codewords.main[literal],
- codes->lens.main[literal]);
+ lzx_write_varbits(os, codes->codewords.main[literal],
+ codes->lens.main[literal], LZX_MAX_MAIN_CODEWORD_LEN);
}
static unsigned
-lzx_build_precode(const u8 lens[restrict],
- const u8 prev_lens[restrict],
- const unsigned num_syms,
- input_idx_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
- u8 output_syms[restrict num_syms],
- u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
- u32 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
- unsigned *num_additional_bits_ret)
+lzx_compute_precode_items(const u8 lens[restrict],
+ const u8 prev_lens[restrict],
+ const unsigned num_lens,
+ u32 precode_freqs[restrict],
+ unsigned precode_items[restrict])
{
- memset(precode_freqs, 0,
- LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
-
- /* Since the code word lengths use a form of RLE encoding, the goal here
- * is to find each run of identical lengths when going through them in
- * symbol order (including runs of length 1). For each run, as many
- * lengths are encoded using RLE as possible, and the rest are output
- * literally.
- *
- * output_syms[] will be filled in with the length symbols that will be
- * output, including RLE codes, not yet encoded using the precode.
- *
- * cur_run_len keeps track of how many code word lengths are in the
- * current run of identical lengths. */
- unsigned output_syms_idx = 0;
- unsigned cur_run_len = 1;
- unsigned num_additional_bits = 0;
- for (unsigned i = 1; i <= num_syms; i++) {
-
- if (i != num_syms && lens[i] == lens[i - 1]) {
- /* Still in a run--- keep going. */
- cur_run_len++;
- continue;
- }
+ unsigned *itemptr;
+ unsigned run_start;
+ unsigned run_end;
+ unsigned extra_bits;
+ int delta;
+ u8 len;
+
+ itemptr = precode_items;
+ run_start = 0;
+ do {
+ /* Find the next run of codeword lengths. */
- /* Run ended! Check if it is a run of zeroes or a run of
- * nonzeroes. */
+ /* len = the length being repeated */
+ len = lens[run_start];
- /* The symbol that was repeated in the run--- not to be confused
- * with the length *of* the run (cur_run_len) */
- unsigned len_in_run = lens[i - 1];
+ run_end = run_start + 1;
+
+ /* Fast case for a single length. */
+ if (likely(run_end == num_lens || len != lens[run_end])) {
+ delta = prev_lens[run_start] - len;
+ if (delta < 0)
+ delta += 17;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
+ continue;
+ }
- if (len_in_run == 0) {
- /* A run of 0's. Encode it in as few length
- * codes as we can. */
+ /* Extend the run. */
+ do {
+ run_end++;
+ } while (run_end != num_lens && len == lens[run_end]);
- /* The magic length 18 indicates a run of 20 + n zeroes,
- * where n is an uncompressed literal 5-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 20) {
- unsigned additional_bits;
+ if (len == 0) {
+ /* Run of zeroes. */
- additional_bits = min(cur_run_len - 20, 0x1f);
- num_additional_bits += 5;
+ /* Symbol 18: RLE 20 to 51 zeroes at a time. */
+ while ((run_end - run_start) >= 20) {
+ extra_bits = min((run_end - run_start) - 20, 0x1f);
precode_freqs[18]++;
- output_syms[output_syms_idx++] = 18;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 20 + additional_bits;
+ *itemptr++ = 18 | (extra_bits << 5);
+ run_start += 20 + extra_bits;
}
- /* The magic length 17 indicates a run of 4 + n zeroes,
- * where n is an uncompressed literal 4-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
-
- additional_bits = min(cur_run_len - 4, 0xf);
- num_additional_bits += 4;
+ /* Symbol 17: RLE 4 to 19 zeroes at a time. */
+ if ((run_end - run_start) >= 4) {
+ extra_bits = min((run_end - run_start) - 4, 0xf);
precode_freqs[17]++;
- output_syms[output_syms_idx++] = 17;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 4 + additional_bits;
+ *itemptr++ = 17 | (extra_bits << 5);
+ run_start += 4 + extra_bits;
}
-
} else {
/* A run of nonzero lengths. */
- /* The magic length 19 indicates a run of 4 + n
- * nonzeroes, where n is a literal bit that follows the
- * magic length, and where the value of the lengths in
- * the run is given by an extra length symbol, encoded
- * with the precode, that follows the literal bit.
- *
- * The extra length symbol is encoded as a difference
- * from the length of the codeword for the first symbol
- * in the run in the previous code.
- * */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
- signed char delta;
-
- additional_bits = (cur_run_len > 4);
- num_additional_bits += 1;
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Symbol 19: RLE 4 to 5 of any length at a time. */
+ while ((run_end - run_start) >= 4) {
+ extra_bits = (run_end - run_start) > 4;
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
precode_freqs[19]++;
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = 19;
- output_syms[output_syms_idx++] = additional_bits;
- output_syms[output_syms_idx++] = delta;
- cur_run_len -= 4 + additional_bits;
+ precode_freqs[delta]++;
+ *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
+ run_start += 4 + extra_bits;
}
}
- /* Any remaining lengths in the run are outputted without RLE,
- * as a difference from the length of that codeword in the
- * previous code. */
- while (cur_run_len > 0) {
- signed char delta;
-
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Output any remaining lengths without RLE. */
+ while (run_start != run_end) {
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
-
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = delta;
- cur_run_len--;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
}
+ } while (run_start != num_lens);
- cur_run_len = 1;
- }
-
- /* Build the precode from the frequencies of the length symbols. */
-
- make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
- precode_freqs, precode_lens,
- precode_codewords);
-
- *num_additional_bits_ret = num_additional_bits;
-
- return output_syms_idx;
+ return itemptr - precode_items;
}
/*
* as deltas from the codeword lengths of the corresponding code in the previous
* block.
*
- * @out:
+ * @os:
* Bitstream to which to write the compressed Huffman code.
* @lens:
* The codeword lengths, indexed by symbol, in the Huffman code.
* @prev_lens:
* The codeword lengths, indexed by symbol, in the corresponding Huffman
* code in the previous block, or all zeroes if this is the first block.
- * @num_syms:
+ * @num_lens:
* The number of symbols in the Huffman code.
*/
static void
-lzx_write_compressed_code(struct output_bitstream *out,
+lzx_write_compressed_code(struct lzx_output_bitstream *os,
const u8 lens[restrict],
const u8 prev_lens[restrict],
- unsigned num_syms)
+ unsigned num_lens)
{
- input_idx_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
- u8 output_syms[num_syms];
+ u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
+ unsigned precode_items[num_lens];
+ unsigned num_precode_items;
+ unsigned precode_item;
+ unsigned precode_sym;
unsigned i;
- unsigned num_output_syms;
- u8 precode_sym;
- unsigned dummy;
-
- num_output_syms = lzx_build_precode(lens,
- prev_lens,
- num_syms,
- precode_freqs,
- output_syms,
- precode_lens,
- precode_codewords,
- &dummy);
-
- /* Write the lengths of the precode codes to the output. */
+
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(out, precode_lens[i],
- LZX_PRECODE_ELEMENT_SIZE);
-
- /* Write the length symbols, encoded with the precode, to the output. */
-
- for (i = 0; i < num_output_syms; ) {
- precode_sym = output_syms[i++];
-
- bitstream_put_bits(out, precode_codewords[precode_sym],
- precode_lens[precode_sym]);
- switch (precode_sym) {
- case 17:
- bitstream_put_bits(out, output_syms[i++], 4);
- break;
- case 18:
- bitstream_put_bits(out, output_syms[i++], 5);
- break;
- case 19:
- bitstream_put_bits(out, output_syms[i++], 1);
- bitstream_put_bits(out,
- precode_codewords[output_syms[i]],
- precode_lens[output_syms[i]]);
- i++;
- break;
- default:
- break;
+ precode_freqs[i] = 0;
+
+ /* Compute the "items" (RLE / literal tokens and extra bits) with which
+ * the codeword lengths in the larger code will be output. */
+ num_precode_items = lzx_compute_precode_items(lens,
+ prev_lens,
+ num_lens,
+ precode_freqs,
+ precode_items);
+
+ /* Build the precode. */
+ make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
+ LZX_MAX_PRE_CODEWORD_LEN,
+ precode_freqs, precode_lens,
+ precode_codewords);
+
+ /* Output the lengths of the codewords in the precode. */
+ for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
+ lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
+
+ /* Output the encoded lengths of the codewords in the larger code. */
+ for (i = 0; i < num_precode_items; i++) {
+ precode_item = precode_items[i];
+ precode_sym = precode_item & 0x1F;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ if (precode_sym >= 17) {
+ if (precode_sym == 17) {
+ lzx_write_bits(os, precode_item >> 5, 4);
+ } else if (precode_sym == 18) {
+ lzx_write_bits(os, precode_item >> 5, 5);
+ } else {
+ lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ precode_sym = precode_item >> 6;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ }
}
}
}
* compressed block to the output bitstream in the final compressed
* representation.
*
- * @ostream
+ * @os
* The output bitstream.
* @block_type
* The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
* LZX_BLOCKTYPE_VERBATIM).
- * @match_tab
+ * @items
* The array of matches/literals to output.
- * @match_count
- * Number of matches/literals to output (length of @match_tab).
+ * @num_items
+ * Number of matches/literals to output (length of @items).
* @codes
* The main, length, and aligned offset Huffman codes for the current
* LZX compressed block.
*/
static void
-lzx_write_matches_and_literals(struct output_bitstream *ostream,
- int block_type,
- const struct lzx_match match_tab[],
- unsigned match_count,
- const struct lzx_codes *codes)
+lzx_write_items(struct lzx_output_bitstream *os, int block_type,
+ const struct lzx_item items[], u32 num_items,
+ const struct lzx_codes *codes)
{
- for (unsigned i = 0; i < match_count; i++) {
- struct lzx_match match = match_tab[i];
+ unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
+ for (u32 i = 0; i < num_items; i++) {
/* The high bit of the 32-bit intermediate representation
* indicates whether the item is an actual LZ-style match (1) or
* a literal byte (0). */
- if (match.data & 0x80000000)
- lzx_write_match(ostream, block_type, match, codes);
+ if (items[i].data & 0x80000000)
+ lzx_write_match(os, ones_if_aligned, items[i], codes);
else
- lzx_write_literal(ostream, match.data, codes);
+ lzx_write_literal(os, items[i].data, codes);
}
}
-static void
-lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
-{
-#ifdef ENABLE_LZX_DEBUG
- unsigned i;
-
- for (i = 0; i < num_main_syms; i++)
- LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
-
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
-
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
-
- const unsigned tablebits = 10;
- u16 decode_table[(1 << tablebits) +
- (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
- _aligned_attribute(DECODE_TABLE_ALIGNMENT);
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- num_main_syms,
- min(tablebits, LZX_MAINCODE_TABLEBITS),
- codes->lens.main,
- LZX_MAX_MAIN_CODEWORD_LEN));
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- LZX_LENCODE_NUM_SYMBOLS,
- min(tablebits, LZX_LENCODE_TABLEBITS),
- codes->lens.len,
- LZX_MAX_LEN_CODEWORD_LEN));
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- LZX_ALIGNEDCODE_NUM_SYMBOLS,
- min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
- codes->lens.aligned,
- LZX_MAX_ALIGNED_CODEWORD_LEN));
-#endif /* ENABLE_LZX_DEBUG */
-}
-
/* Write an LZX aligned offset or verbatim block to the output. */
static void
lzx_write_compressed_block(int block_type,
- unsigned block_size,
- unsigned max_window_size,
+ u32 block_size,
+ unsigned window_order,
unsigned num_main_syms,
- struct lzx_match * chosen_matches,
- unsigned num_chosen_matches,
+ struct lzx_item * chosen_items,
+ u32 num_chosen_items,
const struct lzx_codes * codes,
const struct lzx_codes * prev_codes,
- struct output_bitstream * ostream)
+ struct lzx_output_bitstream * os)
{
- unsigned i;
-
LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
block_type == LZX_BLOCKTYPE_VERBATIM);
- lzx_assert_codes_valid(codes, num_main_syms);
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
- bitstream_put_bits(ostream, block_type, 3);
+ lzx_write_bits(os, block_type, 3);
/* Output the block size.
*
* because WIMs created with chunk size greater than 32768 can seemingly
* only be opened by wimlib anyway. */
if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
- bitstream_put_bits(ostream, 1, 1);
+ lzx_write_bits(os, 1, 1);
} else {
- bitstream_put_bits(ostream, 0, 1);
+ lzx_write_bits(os, 0, 1);
- if (max_window_size >= 65536)
- bitstream_put_bits(ostream, block_size >> 16, 8);
+ if (window_order >= 16)
+ lzx_write_bits(os, block_size >> 16, 8);
- bitstream_put_bits(ostream, block_size, 16);
+ lzx_write_bits(os, block_size & 0xFFFF, 16);
+ }
+
+ /* Output the aligned offset code. */
+ if (block_type == LZX_BLOCKTYPE_ALIGNED) {
+ for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ lzx_write_bits(os, codes->lens.aligned[i],
+ LZX_ALIGNEDCODE_ELEMENT_SIZE);
+ }
}
- /* Write out lengths of the main code. Note that the LZX specification
- * incorrectly states that the aligned offset code comes after the
- * length code, but in fact it is the very first code to be written
- * (before the main code). */
- if (block_type == LZX_BLOCKTYPE_ALIGNED)
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(ostream, codes->lens.aligned[i],
- LZX_ALIGNEDCODE_ELEMENT_SIZE);
-
- LZX_DEBUG("Writing main code...");
-
- /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
- * the main code, which are the codewords for literal bytes. */
- lzx_write_compressed_code(ostream,
- codes->lens.main,
+ /* Output the main code (two parts). */
+ lzx_write_compressed_code(os, codes->lens.main,
prev_codes->lens.main,
LZX_NUM_CHARS);
-
- /* Write the precode and lengths for the rest of the main code, which
- * are the codewords for match headers. */
- lzx_write_compressed_code(ostream,
- codes->lens.main + LZX_NUM_CHARS,
+ lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
prev_codes->lens.main + LZX_NUM_CHARS,
num_main_syms - LZX_NUM_CHARS);
- LZX_DEBUG("Writing length code...");
-
- /* Write the precode and lengths for the length code. */
- lzx_write_compressed_code(ostream,
- codes->lens.len,
+ /* Output the length code. */
+ lzx_write_compressed_code(os, codes->lens.len,
prev_codes->lens.len,
LZX_LENCODE_NUM_SYMBOLS);
- LZX_DEBUG("Writing matches and literals...");
-
- /* Write the actual matches and literals. */
- lzx_write_matches_and_literals(ostream, block_type,
- chosen_matches, num_chosen_matches,
- codes);
-
- LZX_DEBUG("Done writing block.");
+ /* Output the compressed matches and literals. */
+ lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
}
/* Write out the LZX blocks that were computed. */
static void
-lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
+lzx_write_all_blocks(struct lzx_compressor *c, struct lzx_output_bitstream *os)
{
- const struct lzx_codes *prev_codes = &ctx->zero_codes;
- for (unsigned i = 0; i < ctx->num_blocks; i++) {
- const struct lzx_block_spec *spec = &ctx->block_specs[i];
-
- LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
- i + 1, ctx->num_blocks,
- spec->block_type, spec->block_size,
- spec->num_chosen_matches);
+ const struct lzx_codes *prev_codes = &c->zero_codes;
+ for (unsigned i = 0; i < c->num_blocks; i++) {
+ const struct lzx_block_spec *spec = &c->block_specs[i];
lzx_write_compressed_block(spec->block_type,
spec->block_size,
- ctx->max_window_size,
- ctx->num_main_syms,
- spec->chosen_matches,
- spec->num_chosen_matches,
+ c->window_order,
+ c->num_main_syms,
+ spec->chosen_items,
+ spec->num_chosen_items,
&spec->codes,
prev_codes,
- ostream);
+ os);
prev_codes = &spec->codes;
}
/* Constructs an LZX match from a literal byte and updates the main code symbol
* frequencies. */
-static u32
+static inline u32
lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
{
freqs->main[lit]++;
* queue and the frequency of symbols in the main, length, and aligned offset
* alphabets. The return value is a 32-bit number that provides the match in an
* intermediate representation documented below. */
-static u32
+static inline u32
lzx_tally_match(unsigned match_len, u32 match_offset,
struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
{
unsigned position_slot;
- unsigned position_footer;
+ u32 position_footer;
u32 len_header;
unsigned main_symbol;
unsigned len_footer;
* as part of the main symbol) and a position footer. */
position_slot = lzx_get_position_slot(match_offset, queue);
position_footer = (match_offset + LZX_OFFSET_OFFSET) &
- ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
+ (((u32)1 << lzx_get_num_extra_bits(position_slot)) - 1);
/* The match length shall be encoded as a length header (itself encoded
* as part of the main symbol) and an optional length footer. */
freqs->aligned[position_footer & 7]++;
/* Pack the position slot, position footer, and match length into an
- * intermediate representation. See `struct lzx_match' for details.
+ * intermediate representation. See `struct lzx_item' for details.
*/
LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
(adjusted_match_len);
}
-struct lzx_record_ctx {
- struct lzx_freqs freqs;
- struct lzx_lru_queue queue;
- struct lzx_match *matches;
-};
-
-static void
-lzx_record_match(unsigned len, unsigned offset, void *_ctx)
-{
- struct lzx_record_ctx *ctx = _ctx;
-
- (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
-}
-
-static void
-lzx_record_literal(u8 lit, void *_ctx)
-{
- struct lzx_record_ctx *ctx = _ctx;
-
- (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
-}
-
/* Returns the cost, in bits, to output a literal byte using the specified cost
* model. */
static u32
return costs->main[c];
}
-/* Given a (length, offset) pair that could be turned into a valid LZX match as
- * well as costs for the codewords in the main, length, and aligned Huffman
- * codes, return the approximate number of bits it will take to represent this
- * match in the compressed output. Take into account the match offset LRU
- * queue and optionally update it. */
+/* Returns the cost, in bits, to output a repeat offset match of the specified
+ * length and position slot (repeat index) using the specified cost model. */
static u32
-lzx_match_cost(unsigned length, u32 offset, const struct lzx_costs *costs,
- struct lzx_lru_queue *queue)
+lzx_repmatch_cost(u32 len, unsigned position_slot, const struct lzx_costs *costs)
{
- unsigned position_slot;
unsigned len_header, main_symbol;
- unsigned num_extra_bits;
u32 cost = 0;
- position_slot = lzx_get_position_slot(offset, queue);
-
- len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
+ len_header = min(len - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
/* Account for main symbol. */
cost += costs->main[main_symbol];
- /* Account for extra position information. */
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
- if (num_extra_bits >= 3) {
- cost += num_extra_bits - 3;
- cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
- } else {
- cost += num_extra_bits;
- }
-
/* Account for extra length information. */
if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ cost += costs->len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
return cost;
-
}
-/* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
+/* Set the cost model @c->costs from the Huffman codeword lengths specified in
* @lens.
*
* The cost model and codeword lengths are almost the same thing, but the
* length) to take into account the fact that uses of these symbols are expected
* to be rare. */
static void
-lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
+lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens,
+ unsigned nostat)
{
unsigned i;
- unsigned num_main_syms = ctx->num_main_syms;
/* Main code */
- for (i = 0; i < num_main_syms; i++) {
- ctx->costs.main[i] = lens->main[i];
- if (ctx->costs.main[i] == 0)
- ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
- }
+ for (i = 0; i < c->num_main_syms; i++)
+ c->costs.main[i] = lens->main[i] ? lens->main[i] : nostat;
/* Length code */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
- ctx->costs.len[i] = lens->len[i];
- if (ctx->costs.len[i] == 0)
- ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
- }
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
+ c->costs.len[i] = lens->len[i] ? lens->len[i] : nostat;
/* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
- ctx->costs.aligned[i] = lens->aligned[i];
- if (ctx->costs.aligned[i] == 0)
- ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
- }
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
+ c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : nostat / 2;
}
-/* Retrieve a list of matches available at the next position in the input.
- *
- * A pointer to the matches array is written into @matches_ret, and the return
- * value is the number of matches found. */
-static unsigned
-lzx_get_matches(struct lzx_compressor *ctx,
- const struct raw_match **matches_ret)
+/* Don't allow matches to span the end of an LZX block. */
+static inline u32
+maybe_truncate_matches(struct lz_match matches[], u32 num_matches,
+ struct lzx_compressor *c)
{
- struct raw_match *cache_ptr;
- struct raw_match *matches;
- unsigned num_matches;
-
- LZX_ASSERT(ctx->match_window_pos < ctx->match_window_end);
-
- cache_ptr = ctx->cache_ptr;
- matches = cache_ptr + 1;
- if (ctx->matches_cached) {
- num_matches = cache_ptr->len;
- } else {
- num_matches = lz_bt_get_matches(&ctx->mf, matches);
- cache_ptr->len = num_matches;
- }
-
- /* Don't allow matches to span the end of an LZX block. */
- if (ctx->match_window_end < ctx->window_size && num_matches != 0) {
- unsigned limit = ctx->match_window_end - ctx->match_window_pos;
+ if (c->match_window_end < c->cur_window_size && num_matches != 0) {
+ u32 limit = c->match_window_end - c->match_window_pos;
if (limit >= LZX_MIN_MATCH_LEN) {
- unsigned i = num_matches - 1;
+ u32 i = num_matches - 1;
do {
if (matches[i].len >= limit) {
matches[i].len = limit;
} else {
num_matches = 0;
}
- cache_ptr->len = num_matches;
}
+ return num_matches;
+}
-#if 0
- fprintf(stderr, "Pos %u/%u: %u matches\n",
- ctx->match_window_pos, ctx->window_size, num_matches);
- for (unsigned i = 0; i < num_matches; i++)
- fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
-#endif
+static unsigned
+lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
-#ifdef ENABLE_LZX_DEBUG
- for (unsigned i = 0; i < num_matches; i++) {
- LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
- LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
- LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
- LZX_ASSERT(matches[i].offset > 0);
- LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
- LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
- &ctx->window[ctx->match_window_pos - matches[i].offset],
- matches[i].len));
- if (i) {
- LZX_ASSERT(matches[i].len > matches[i - 1].len);
- LZX_ASSERT(matches[i].offset > matches[i - 1].offset);
- }
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (likely(cache_ptr <= c->cache_limit)) {
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ cache_ptr->len = num_matches;
+ c->cache_ptr = matches + num_matches;
+ } else {
+ num_matches = 0;
}
-#endif
- ctx->match_window_pos++;
- ctx->cache_ptr = matches + num_matches;
+ c->match_window_pos++;
*matches_ret = matches;
return num_matches;
}
-static void
-lzx_skip_bytes(struct lzx_compressor *ctx, unsigned n)
+static unsigned
+lzx_get_matches_fillcache_multiblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- struct raw_match *cache_ptr;
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
- LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (likely(cache_ptr <= c->cache_limit)) {
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ num_matches = maybe_truncate_matches(matches, num_matches, c);
+ cache_ptr->len = num_matches;
+ c->cache_ptr = matches + num_matches;
+ } else {
+ num_matches = 0;
+ }
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
- cache_ptr = ctx->cache_ptr;
- ctx->match_window_pos += n;
- if (ctx->matches_cached) {
- while (n--)
- cache_ptr += 1 + cache_ptr->len;
+static unsigned
+lzx_get_matches_usecache(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
+
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (cache_ptr <= c->cache_limit) {
+ num_matches = cache_ptr->len;
+ c->cache_ptr = matches + num_matches;
} else {
- lz_bt_skip_positions(&ctx->mf, n);
- while (n--) {
+ num_matches = 0;
+ }
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
+
+static unsigned
+lzx_get_matches_usecache_nocheck(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
+
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ num_matches = cache_ptr->len;
+ c->cache_ptr = matches + num_matches;
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
+
+static unsigned
+lzx_get_matches_nocache_singleblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *matches;
+ unsigned num_matches;
+
+ matches = c->cache_ptr;
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
+
+static unsigned
+lzx_get_matches_nocache_multiblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *matches;
+ unsigned num_matches;
+
+ matches = c->cache_ptr;
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ num_matches = maybe_truncate_matches(matches, num_matches, c);
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
+
+/*
+ * Find matches at the next position in the window.
+ *
+ * Returns the number of matches found and sets *matches_ret to point to the
+ * matches array. The matches will be sorted by strictly increasing length and
+ * offset.
+ */
+static inline unsigned
+lzx_get_matches(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ return (*c->get_matches_func)(c, matches_ret);
+}
+
+static void
+lzx_skip_bytes_fillcache(struct lzx_compressor *c, unsigned n)
+{
+ struct lz_match *cache_ptr;
+
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ lz_mf_skip_positions(c->mf, n);
+ if (cache_ptr <= c->cache_limit) {
+ do {
cache_ptr->len = 0;
cache_ptr += 1;
- }
+ } while (--n && cache_ptr <= c->cache_limit);
+ }
+ c->cache_ptr = cache_ptr;
+}
+
+static void
+lzx_skip_bytes_usecache(struct lzx_compressor *c, unsigned n)
+{
+ struct lz_match *cache_ptr;
+
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ if (cache_ptr <= c->cache_limit) {
+ do {
+ cache_ptr += 1 + cache_ptr->len;
+ } while (--n && cache_ptr <= c->cache_limit);
}
- ctx->cache_ptr = cache_ptr;
+ c->cache_ptr = cache_ptr;
+}
+
+static void
+lzx_skip_bytes_usecache_nocheck(struct lzx_compressor *c, unsigned n)
+{
+ struct lz_match *cache_ptr;
+
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ do {
+ cache_ptr += 1 + cache_ptr->len;
+ } while (--n);
+ c->cache_ptr = cache_ptr;
+}
+
+static void
+lzx_skip_bytes_nocache(struct lzx_compressor *c, unsigned n)
+{
+ c->match_window_pos += n;
+ lz_mf_skip_positions(c->mf, n);
+}
+
+/*
+ * Skip the specified number of positions in the window (don't search for
+ * matches at them).
+ */
+static inline void
+lzx_skip_bytes(struct lzx_compressor *c, unsigned n)
+{
+ return (*c->skip_bytes_func)(c, n);
}
/*
*
* Returns the first match in the list.
*/
-static struct raw_match
-lzx_match_chooser_reverse_list(struct lzx_compressor *ctx, unsigned cur_pos)
+static struct lz_match
+lzx_match_chooser_reverse_list(struct lzx_compressor *c, unsigned cur_pos)
{
unsigned prev_link, saved_prev_link;
unsigned prev_match_offset, saved_prev_match_offset;
- ctx->optimum_end_idx = cur_pos;
+ c->optimum_end_idx = cur_pos;
- saved_prev_link = ctx->optimum[cur_pos].prev.link;
- saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
+ saved_prev_link = c->optimum[cur_pos].prev.link;
+ saved_prev_match_offset = c->optimum[cur_pos].prev.match_offset;
do {
prev_link = saved_prev_link;
prev_match_offset = saved_prev_match_offset;
- saved_prev_link = ctx->optimum[prev_link].prev.link;
- saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
+ saved_prev_link = c->optimum[prev_link].prev.link;
+ saved_prev_match_offset = c->optimum[prev_link].prev.match_offset;
- ctx->optimum[prev_link].next.link = cur_pos;
- ctx->optimum[prev_link].next.match_offset = prev_match_offset;
+ c->optimum[prev_link].next.link = cur_pos;
+ c->optimum[prev_link].next.match_offset = prev_match_offset;
cur_pos = prev_link;
} while (cur_pos != 0);
- ctx->optimum_cur_idx = ctx->optimum[0].next.link;
+ c->optimum_cur_idx = c->optimum[0].next.link;
- return (struct raw_match)
- { .len = ctx->optimum_cur_idx,
- .offset = ctx->optimum[0].next.match_offset,
+ return (struct lz_match)
+ { .len = c->optimum_cur_idx,
+ .offset = c->optimum[0].next.match_offset,
};
}
/*
- * lzx_get_near_optimal_match() -
+ * Find the longest repeat offset match.
+ *
+ * If no match of at least LZX_MIN_MATCH_LEN bytes is found, then return 0.
+ *
+ * If a match of at least LZX_MIN_MATCH_LEN bytes is found, then return its
+ * length and set *slot_ret to the index of its offset in @queue.
+ */
+static inline u32
+lzx_repsearch(const u8 * const strptr, const u32 bytes_remaining,
+ const struct lzx_lru_queue *queue, unsigned *slot_ret)
+{
+ BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
+ return lz_repsearch(strptr, bytes_remaining, LZX_MAX_MATCH_LEN,
+ queue->R, LZX_NUM_RECENT_OFFSETS, slot_ret);
+}
+
+/*
+ * lzx_choose_near_optimal_item() -
*
* Choose an approximately optimal match or literal to use at the next position
* in the string, or "window", being LZ-encoded.
* The return value is a (length, offset) pair specifying the match or literal
* chosen. For literals, the length is 0 or 1 and the offset is meaningless.
*/
-static struct raw_match
-lzx_get_near_optimal_match(struct lzx_compressor *ctx)
+static struct lz_match
+lzx_choose_near_optimal_item(struct lzx_compressor *c)
{
unsigned num_matches;
- const struct raw_match *matches;
- const struct raw_match *matchptr;
- struct raw_match match;
- unsigned longest_len;
- unsigned longest_rep_len;
- u32 longest_rep_offset;
+ const struct lz_match *matches;
+ struct lz_match match;
+ u32 longest_len;
+ u32 longest_rep_len;
+ unsigned longest_rep_slot;
unsigned cur_pos;
unsigned end_pos;
+ struct lzx_mc_pos_data *optimum = c->optimum;
- if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
+ if (c->optimum_cur_idx != c->optimum_end_idx) {
/* Case 2: Return the next match/literal already found. */
- match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
- ctx->optimum_cur_idx;
- match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
+ match.len = optimum[c->optimum_cur_idx].next.link -
+ c->optimum_cur_idx;
+ match.offset = optimum[c->optimum_cur_idx].next.match_offset;
- ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
+ c->optimum_cur_idx = optimum[c->optimum_cur_idx].next.link;
return match;
}
/* Case 1: Compute a new list of matches/literals to return. */
- ctx->optimum_cur_idx = 0;
- ctx->optimum_end_idx = 0;
-
- /* Search for matches at recent offsets. Only keep the one with the
- * longest match length. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- if (ctx->match_window_pos >= 1) {
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- ctx->match_window_end - ctx->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = ctx->queue.R[i];
- const u8 *strptr = &ctx->window[ctx->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
- }
- }
+ c->optimum_cur_idx = 0;
+ c->optimum_end_idx = 0;
+
+ /* Search for matches at repeat offsets. As a heuristic, we only keep
+ * the one with the longest match length. */
+ if (likely(c->match_window_pos >= 1)) {
+ longest_rep_len = lzx_repsearch(&c->cur_window[c->match_window_pos],
+ c->match_window_end - c->match_window_pos,
+ &c->queue,
+ &longest_rep_slot);
+ } else {
+ longest_rep_len = 0;
}
- /* If there's a long match with a recent offset, take it. */
- if (longest_rep_len >= ctx->params.alg_params.slow.nice_match_length) {
- lzx_skip_bytes(ctx, longest_rep_len);
- return (struct raw_match) {
+ /* If there's a long match with a repeat offset, choose it immediately. */
+ if (longest_rep_len >= c->params.nice_match_length) {
+ lzx_skip_bytes(c, longest_rep_len);
+ return (struct lz_match) {
.len = longest_rep_len,
- .offset = longest_rep_offset,
+ .offset = c->queue.R[longest_rep_slot],
};
}
- /* Search other matches. */
- num_matches = lzx_get_matches(ctx, &matches);
+ /* Find other matches. */
+ num_matches = lzx_get_matches(c, &matches);
- /* If there's a long match, take it. */
+ /* If there's a long match, choose it immediately. */
if (num_matches) {
longest_len = matches[num_matches - 1].len;
- if (longest_len >= ctx->params.alg_params.slow.nice_match_length) {
- lzx_skip_bytes(ctx, longest_len - 1);
+ if (longest_len >= c->params.nice_match_length) {
+ lzx_skip_bytes(c, longest_len - 1);
return matches[num_matches - 1];
}
} else {
longest_len = 1;
}
- /* Calculate the cost to reach the next position by coding a literal.
- */
- ctx->optimum[1].queue = ctx->queue;
- ctx->optimum[1].cost = lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
- &ctx->costs);
- ctx->optimum[1].prev.link = 0;
+ /* Calculate the cost to reach the next position by coding a literal. */
+ optimum[1].queue = c->queue;
+ optimum[1].cost = lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
+ &c->costs);
+ optimum[1].prev.link = 0;
/* Calculate the cost to reach any position up to and including that
- * reached by the longest match. */
- matchptr = matches;
- for (unsigned len = 2; len <= longest_len; len++) {
- u32 offset = matchptr->offset;
-
- ctx->optimum[len].queue = ctx->queue;
- ctx->optimum[len].prev.link = 0;
- ctx->optimum[len].prev.match_offset = offset;
- ctx->optimum[len].cost = lzx_match_cost(len, offset, &ctx->costs,
- &ctx->optimum[len].queue);
- if (len == matchptr->len)
- matchptr++;
+ * reached by the longest match.
+ *
+ * Note: We consider only the lowest-offset match that reaches each
+ * position.
+ *
+ * Note: Some of the cost calculation stays the same for each offset,
+ * regardless of how many lengths it gets used for. Therefore, to
+ * improve performance, we hand-code the cost calculation instead of
+ * calling lzx_match_cost() to do a from-scratch cost evaluation at each
+ * length. */
+ for (unsigned i = 0, len = 2; i < num_matches; i++) {
+ u32 offset;
+ struct lzx_lru_queue queue;
+ u32 position_cost;
+ unsigned position_slot;
+ unsigned num_extra_bits;
+
+ offset = matches[i].offset;
+ queue = c->queue;
+ position_cost = 0;
+
+ position_slot = lzx_get_position_slot(offset, &queue);
+ num_extra_bits = lzx_get_num_extra_bits(position_slot);
+ if (num_extra_bits >= 3) {
+ position_cost += num_extra_bits - 3;
+ position_cost += c->costs.aligned[(offset + LZX_OFFSET_OFFSET) & 7];
+ } else {
+ position_cost += num_extra_bits;
+ }
+
+ do {
+ u32 cost;
+ unsigned len_header;
+ unsigned main_symbol;
+
+ cost = position_cost;
+
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ cost += c->costs.len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ }
+
+ main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
+ cost += c->costs.main[main_symbol];
+
+ optimum[len].queue = queue;
+ optimum[len].prev.link = 0;
+ optimum[len].prev.match_offset = offset;
+ optimum[len].cost = cost;
+ } while (++len <= matches[i].len);
}
end_pos = longest_len;
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
+ if (longest_rep_len) {
+
+ LZX_ASSERT(longest_rep_len >= LZX_MIN_MATCH_LEN);
+
u32 cost;
while (end_pos < longest_rep_len)
- ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
-
- queue = ctx->queue;
- cost = lzx_match_cost(longest_rep_len, longest_rep_offset,
- &ctx->costs, &queue);
- if (cost <= ctx->optimum[longest_rep_len].cost) {
- ctx->optimum[longest_rep_len].queue = queue;
- ctx->optimum[longest_rep_len].prev.link = 0;
- ctx->optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
- ctx->optimum[longest_rep_len].cost = cost;
+ optimum[++end_pos].cost = MC_INFINITE_COST;
+
+ cost = lzx_repmatch_cost(longest_rep_len, longest_rep_slot,
+ &c->costs);
+ if (cost <= optimum[longest_rep_len].cost) {
+ optimum[longest_rep_len].queue = c->queue;
+ swap(optimum[longest_rep_len].queue.R[0],
+ optimum[longest_rep_len].queue.R[longest_rep_slot]);
+ optimum[longest_rep_len].prev.link = 0;
+ optimum[longest_rep_len].prev.match_offset =
+ optimum[longest_rep_len].queue.R[0];
+ optimum[longest_rep_len].cost = cost;
}
}
* position. The algorithm may find multiple paths to reach each
* position; only the lowest-cost path is saved.
*
- * The progress of the parse is tracked in the @ctx->optimum array, which
- * for each position contains the minimum cost to reach that position,
- * the index of the start of the match/literal taken to reach that
- * position through the minimum-cost path, the offset of the match taken
- * (not relevant for literals), and the adaptive state that will exist
- * at that position after the minimum-cost path is taken. The @cur_pos
+ * The progress of the parse is tracked in the @optimum array, which for
+ * each position contains the minimum cost to reach that position, the
+ * index of the start of the match/literal taken to reach that position
+ * through the minimum-cost path, the offset of the match taken (not
+ * relevant for literals), and the adaptive state that will exist at
+ * that position after the minimum-cost path is taken. The @cur_pos
* variable stores the position at which the algorithm is currently
* considering coding choices, and the @end_pos variable stores the
* greatest position at which the costs of coding choices have been
- * saved. (Actually, the algorithm guarantees that all positions up to
- * and including @end_pos are reachable by at least one path.)
+ * saved.
*
* The loop terminates when any one of the following conditions occurs:
*
* match/literal list.
*
* 3. Failing either of the above in a degenerate case, the loop
- * terminates when space in the @ctx->optimum array is exhausted.
+ * terminates when space in the @optimum array is exhausted.
* This terminates the algorithm and forces it to start returning
* matches/literals even though they may not be globally optimal.
*
cur_pos++;
/* Check termination conditions (2) and (3) noted above. */
- if (cur_pos == end_pos || cur_pos == LZX_OPTIM_ARRAY_SIZE)
- return lzx_match_chooser_reverse_list(ctx, cur_pos);
-
- /* Search for matches at recent offsets. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- ctx->match_window_end - ctx->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = ctx->optimum[cur_pos].queue.R[i];
- const u8 *strptr = &ctx->window[ctx->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
- }
- }
+ if (cur_pos == end_pos || cur_pos == LZX_OPTIM_ARRAY_LENGTH)
+ return lzx_match_chooser_reverse_list(c, cur_pos);
+
+ /* Search for matches at repeat offsets. Again, as a heuristic
+ * we only keep the longest one. */
+ longest_rep_len = lzx_repsearch(&c->cur_window[c->match_window_pos],
+ c->match_window_end - c->match_window_pos,
+ &optimum[cur_pos].queue,
+ &longest_rep_slot);
- /* If we found a long match at a recent offset, choose it
+ /* If we found a long match at a repeat offset, choose it
* immediately. */
- if (longest_rep_len >= ctx->params.alg_params.slow.nice_match_length) {
+ if (longest_rep_len >= c->params.nice_match_length) {
/* Build the list of matches to return and get
* the first one. */
- match = lzx_match_chooser_reverse_list(ctx, cur_pos);
+ match = lzx_match_chooser_reverse_list(c, cur_pos);
/* Append the long match to the end of the list. */
- ctx->optimum[cur_pos].next.match_offset = longest_rep_offset;
- ctx->optimum[cur_pos].next.link = cur_pos + longest_rep_len;
- ctx->optimum_end_idx = cur_pos + longest_rep_len;
+ optimum[cur_pos].next.match_offset =
+ optimum[cur_pos].queue.R[longest_rep_slot];
+ optimum[cur_pos].next.link = cur_pos + longest_rep_len;
+ c->optimum_end_idx = cur_pos + longest_rep_len;
/* Skip over the remaining bytes of the long match. */
- lzx_skip_bytes(ctx, longest_rep_len);
+ lzx_skip_bytes(c, longest_rep_len);
/* Return first match in the list. */
return match;
}
- /* Search other matches. */
- num_matches = lzx_get_matches(ctx, &matches);
+ /* Find other matches. */
+ num_matches = lzx_get_matches(c, &matches);
- /* If there's a long match, take it. */
+ /* If there's a long match, choose it immediately. */
if (num_matches) {
longest_len = matches[num_matches - 1].len;
- if (longest_len >= ctx->params.alg_params.slow.nice_match_length) {
+ if (longest_len >= c->params.nice_match_length) {
/* Build the list of matches to return and get
* the first one. */
- match = lzx_match_chooser_reverse_list(ctx, cur_pos);
+ match = lzx_match_chooser_reverse_list(c, cur_pos);
/* Append the long match to the end of the list. */
- ctx->optimum[cur_pos].next.match_offset =
+ optimum[cur_pos].next.match_offset =
matches[num_matches - 1].offset;
- ctx->optimum[cur_pos].next.link = cur_pos + longest_len;
- ctx->optimum_end_idx = cur_pos + longest_len;
+ optimum[cur_pos].next.link = cur_pos + longest_len;
+ c->optimum_end_idx = cur_pos + longest_len;
/* Skip over the remaining bytes of the long match. */
- lzx_skip_bytes(ctx, longest_len - 1);
+ lzx_skip_bytes(c, longest_len - 1);
/* Return first match in the list. */
return match;
longest_len = 1;
}
+ /* If we are reaching any positions for the first time, we need
+ * to initialize their costs to infinity. */
while (end_pos < cur_pos + longest_len)
- ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
+ optimum[++end_pos].cost = MC_INFINITE_COST;
/* Consider coding a literal. */
- cost = ctx->optimum[cur_pos].cost +
- lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
- &ctx->costs);
- if (cost < ctx->optimum[cur_pos + 1].cost) {
- ctx->optimum[cur_pos + 1].queue = ctx->optimum[cur_pos].queue;
- ctx->optimum[cur_pos + 1].cost = cost;
- ctx->optimum[cur_pos + 1].prev.link = cur_pos;
+ cost = optimum[cur_pos].cost +
+ lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
+ &c->costs);
+ if (cost < optimum[cur_pos + 1].cost) {
+ optimum[cur_pos + 1].queue = optimum[cur_pos].queue;
+ optimum[cur_pos + 1].cost = cost;
+ optimum[cur_pos + 1].prev.link = cur_pos;
}
- /* Consider coding a match. */
- matchptr = matches;
- for (unsigned len = 2; len <= longest_len; len++) {
+ /* Consider coding a match.
+ *
+ * The hard-coded cost calculation is done for the same reason
+ * stated in the comment for the similar loop earlier.
+ * Actually, it is *this* one that has the biggest effect on
+ * performance; overall LZX compression is > 10% faster with
+ * this code compared to calling lzx_match_cost() with each
+ * length. */
+ for (unsigned i = 0, len = 2; i < num_matches; i++) {
u32 offset;
- struct lzx_lru_queue queue;
-
- offset = matchptr->offset;
- queue = ctx->optimum[cur_pos].queue;
-
- cost = ctx->optimum[cur_pos].cost +
- lzx_match_cost(len, offset, &ctx->costs, &queue);
- if (cost < ctx->optimum[cur_pos + len].cost) {
- ctx->optimum[cur_pos + len].queue = queue;
- ctx->optimum[cur_pos + len].prev.link = cur_pos;
- ctx->optimum[cur_pos + len].prev.match_offset = offset;
- ctx->optimum[cur_pos + len].cost = cost;
+ u32 position_cost;
+ unsigned position_slot;
+ unsigned num_extra_bits;
+
+ offset = matches[i].offset;
+ position_cost = optimum[cur_pos].cost;
+
+ /* Yet another optimization: instead of calling
+ * lzx_get_position_slot(), hand-inline the search of
+ * the repeat offset queue. Then we can omit the
+ * extra_bits calculation for repeat offset matches, and
+ * also only compute the updated queue if we actually do
+ * find a new lowest cost path. */
+ for (position_slot = 0; position_slot < LZX_NUM_RECENT_OFFSETS; position_slot++)
+ if (offset == optimum[cur_pos].queue.R[position_slot])
+ goto have_position_cost;
+
+ position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
+
+ num_extra_bits = lzx_get_num_extra_bits(position_slot);
+ if (num_extra_bits >= 3) {
+ position_cost += num_extra_bits - 3;
+ position_cost += c->costs.aligned[
+ (offset + LZX_OFFSET_OFFSET) & 7];
+ } else {
+ position_cost += num_extra_bits;
}
- if (len == matchptr->len)
- matchptr++;
- }
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
+ have_position_cost:
- while (end_pos < cur_pos + longest_rep_len)
- ctx->optimum[++end_pos].cost = MC_INFINITE_COST;
+ do {
+ u32 cost;
+ unsigned len_header;
+ unsigned main_symbol;
+
+ cost = position_cost;
+
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ cost += c->costs.len[len -
+ LZX_MIN_MATCH_LEN -
+ LZX_NUM_PRIMARY_LENS];
+ }
- queue = ctx->optimum[cur_pos].queue;
+ main_symbol = ((position_slot << 3) | len_header) +
+ LZX_NUM_CHARS;
+ cost += c->costs.main[main_symbol];
+
+ if (cost < optimum[cur_pos + len].cost) {
+ if (position_slot < LZX_NUM_RECENT_OFFSETS) {
+ optimum[cur_pos + len].queue = optimum[cur_pos].queue;
+ swap(optimum[cur_pos + len].queue.R[0],
+ optimum[cur_pos + len].queue.R[position_slot]);
+ } else {
+ optimum[cur_pos + len].queue.R[0] = offset;
+ optimum[cur_pos + len].queue.R[1] = optimum[cur_pos].queue.R[0];
+ optimum[cur_pos + len].queue.R[2] = optimum[cur_pos].queue.R[1];
+ }
+ optimum[cur_pos + len].prev.link = cur_pos;
+ optimum[cur_pos + len].prev.match_offset = offset;
+ optimum[cur_pos + len].cost = cost;
+ }
+ } while (++len <= matches[i].len);
+ }
- cost = ctx->optimum[cur_pos].cost +
- lzx_match_cost(longest_rep_len, longest_rep_offset,
- &ctx->costs, &queue);
- if (cost <= ctx->optimum[cur_pos + longest_rep_len].cost) {
- ctx->optimum[cur_pos + longest_rep_len].queue =
- queue;
- ctx->optimum[cur_pos + longest_rep_len].prev.link =
+ /* Consider coding a repeat offset match.
+ *
+ * As a heuristic, we only consider the longest length of the
+ * longest repeat offset match. This does not, however,
+ * necessarily mean that we will never consider any other repeat
+ * offsets, because above we detect repeat offset matches that
+ * were found by the regular match-finder. Therefore, this
+ * special handling of the longest repeat-offset match is only
+ * helpful for coding a repeat offset match that was *not* found
+ * by the match-finder, e.g. due to being obscured by a less
+ * distant match that is at least as long.
+ *
+ * Note: an alternative, used in LZMA, is to consider every
+ * length of every repeat offset match. This is a more thorough
+ * search, and it makes it unnecessary to detect repeat offset
+ * matches that were found by the regular match-finder. But by
+ * my tests, for LZX the LZMA method slows down the compressor
+ * by ~10% and doesn't actually help the compression ratio too
+ * much.
+ *
+ * Also tested a compromise approach: consider every 3rd length
+ * of the longest repeat offset match. Still didn't seem quite
+ * worth it, though.
+ */
+ if (longest_rep_len) {
+
+ LZX_ASSERT(longest_rep_len >= LZX_MIN_MATCH_LEN);
+
+ while (end_pos < cur_pos + longest_rep_len)
+ optimum[++end_pos].cost = MC_INFINITE_COST;
+
+ cost = optimum[cur_pos].cost +
+ lzx_repmatch_cost(longest_rep_len, longest_rep_slot,
+ &c->costs);
+ if (cost <= optimum[cur_pos + longest_rep_len].cost) {
+ optimum[cur_pos + longest_rep_len].queue =
+ optimum[cur_pos].queue;
+ swap(optimum[cur_pos + longest_rep_len].queue.R[0],
+ optimum[cur_pos + longest_rep_len].queue.R[longest_rep_slot]);
+ optimum[cur_pos + longest_rep_len].prev.link =
cur_pos;
- ctx->optimum[cur_pos + longest_rep_len].prev.match_offset =
- longest_rep_offset;
- ctx->optimum[cur_pos + longest_rep_len].cost =
+ optimum[cur_pos + longest_rep_len].prev.match_offset =
+ optimum[cur_pos + longest_rep_len].queue.R[0];
+ optimum[cur_pos + longest_rep_len].cost =
cost;
}
}
}
}
+static struct lz_match
+lzx_choose_lazy_item(struct lzx_compressor *c)
+{
+ const struct lz_match *matches;
+ struct lz_match cur_match;
+ struct lz_match next_match;
+ u32 num_matches;
+
+ if (c->prev_match.len) {
+ cur_match = c->prev_match;
+ c->prev_match.len = 0;
+ } else {
+ num_matches = lzx_get_matches(c, &matches);
+ if (num_matches == 0 ||
+ (matches[num_matches - 1].len <= 3 &&
+ (matches[num_matches - 1].len <= 2 ||
+ matches[num_matches - 1].offset > 4096)))
+ {
+ return (struct lz_match) { };
+ }
+
+ cur_match = matches[num_matches - 1];
+ }
+
+ if (cur_match.len >= c->params.nice_match_length) {
+ lzx_skip_bytes(c, cur_match.len - 1);
+ return cur_match;
+ }
+
+ num_matches = lzx_get_matches(c, &matches);
+ if (num_matches == 0 ||
+ (matches[num_matches - 1].len <= 3 &&
+ (matches[num_matches - 1].len <= 2 ||
+ matches[num_matches - 1].offset > 4096)))
+ {
+ lzx_skip_bytes(c, cur_match.len - 2);
+ return cur_match;
+ }
+
+ next_match = matches[num_matches - 1];
+
+ if (next_match.len <= cur_match.len) {
+ lzx_skip_bytes(c, cur_match.len - 2);
+ return cur_match;
+ } else {
+ c->prev_match = next_match;
+ return (struct lz_match) { };
+ }
+}
+
+/*
+ * Return the next match or literal to use, delegating to the currently selected
+ * match-choosing algorithm.
+ *
+ * If the length of the returned 'struct lz_match' is less than
+ * LZX_MIN_MATCH_LEN, then it is really a literal.
+ */
+static inline struct lz_match
+lzx_choose_item(struct lzx_compressor *c)
+{
+ return (*c->params.choose_item_func)(c);
+}
+
/* Set default symbol costs for the LZX Huffman codes. */
static void
lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
return LZX_BLOCKTYPE_VERBATIM;
}
-/* Find a near-optimal sequence of matches/literals with which to output the
- * specified LZX block, then set the block's type to that which has the minimum
- * cost to output (either verbatim or aligned). */
+/* Find a sequence of matches/literals with which to output the specified LZX
+ * block, then set the block's type to that which has the minimum cost to output
+ * (either verbatim or aligned). */
static void
-lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
- unsigned num_passes)
+lzx_choose_items_for_block(struct lzx_compressor *c, struct lzx_block_spec *spec)
{
- const struct lzx_lru_queue orig_queue = ctx->queue;
- unsigned num_passes_remaining = num_passes;
+ const struct lzx_lru_queue orig_queue = c->queue;
+ u32 num_passes_remaining = c->params.num_optim_passes;
struct lzx_freqs freqs;
+ const u8 *window_ptr;
+ const u8 *window_end;
+ struct lzx_item *next_chosen_item;
+ struct lz_match lz_match;
+ struct lzx_item lzx_item;
- LZX_ASSERT(num_passes >= 1);
- LZX_ASSERT(lz_bt_get_position(&ctx->mf) == spec->window_pos);
+ LZX_ASSERT(num_passes_remaining >= 1);
+ LZX_ASSERT(lz_mf_get_position(c->mf) == spec->window_pos);
- ctx->match_window_end = spec->window_pos + spec->block_size;
- spec->chosen_matches = &ctx->chosen_matches[spec->window_pos];
- ctx->matches_cached = false;
+ c->match_window_end = spec->window_pos + spec->block_size;
+
+ if (c->params.num_optim_passes > 1) {
+ if (spec->block_size == c->cur_window_size)
+ c->get_matches_func = lzx_get_matches_fillcache_singleblock;
+ else
+ c->get_matches_func = lzx_get_matches_fillcache_multiblock;
+ c->skip_bytes_func = lzx_skip_bytes_fillcache;
+ } else {
+ if (spec->block_size == c->cur_window_size)
+ c->get_matches_func = lzx_get_matches_nocache_singleblock;
+ else
+ c->get_matches_func = lzx_get_matches_nocache_multiblock;
+ c->skip_bytes_func = lzx_skip_bytes_nocache;
+ }
/* The first optimal parsing pass is done using the cost model already
- * set in ctx->costs. Each later pass is done using a cost model
- * computed from the previous pass. */
- do {
- const u8 *window_ptr;
- const u8 *window_end;
- struct lzx_match *next_chosen_match;
+ * set in c->costs. Each later pass is done using a cost model
+ * computed from the previous pass.
+ *
+ * To improve performance we only generate the array containing the
+ * matches and literals in intermediate form on the final pass. */
- --num_passes_remaining;
- ctx->match_window_pos = spec->window_pos;
- ctx->cache_ptr = ctx->cached_matches;
+ while (--num_passes_remaining) {
+ c->match_window_pos = spec->window_pos;
+ c->cache_ptr = c->cached_matches;
memset(&freqs, 0, sizeof(freqs));
- window_ptr = &ctx->window[spec->window_pos];
+ window_ptr = &c->cur_window[spec->window_pos];
window_end = window_ptr + spec->block_size;
- next_chosen_match = spec->chosen_matches;
while (window_ptr != window_end) {
- struct raw_match raw_match;
- struct lzx_match lzx_match;
- raw_match = lzx_get_near_optimal_match(ctx);
+ lz_match = lzx_choose_item(c);
- LZX_ASSERT(!(raw_match.len == LZX_MIN_MATCH_LEN &&
- raw_match.offset == ctx->max_window_size -
+ LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
+ lz_match.offset == c->max_window_size -
LZX_MIN_MATCH_LEN));
- if (raw_match.len >= LZX_MIN_MATCH_LEN) {
- lzx_match.data = lzx_tally_match(raw_match.len,
- raw_match.offset,
- &freqs,
- &ctx->queue);
- window_ptr += raw_match.len;
+ if (lz_match.len >= LZX_MIN_MATCH_LEN) {
+ lzx_tally_match(lz_match.len, lz_match.offset,
+ &freqs, &c->queue);
+ window_ptr += lz_match.len;
} else {
- lzx_match.data = lzx_tally_literal(*window_ptr,
- &freqs);
+ lzx_tally_literal(*window_ptr, &freqs);
window_ptr += 1;
}
- *next_chosen_match++ = lzx_match;
}
- spec->num_chosen_matches = next_chosen_match - spec->chosen_matches;
- lzx_make_huffman_codes(&freqs, &spec->codes, ctx->num_main_syms);
- if (num_passes_remaining) {
- lzx_set_costs(ctx, &spec->codes.lens);
- ctx->queue = orig_queue;
- ctx->matches_cached = true;
+ lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
+ lzx_set_costs(c, &spec->codes.lens, 15);
+ c->queue = orig_queue;
+ if (c->cache_ptr <= c->cache_limit) {
+ c->get_matches_func = lzx_get_matches_usecache_nocheck;
+ c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
+ } else {
+ c->get_matches_func = lzx_get_matches_usecache;
+ c->skip_bytes_func = lzx_skip_bytes_usecache;
}
- } while (num_passes_remaining);
+ }
+
+ c->match_window_pos = spec->window_pos;
+ c->cache_ptr = c->cached_matches;
+ memset(&freqs, 0, sizeof(freqs));
+ window_ptr = &c->cur_window[spec->window_pos];
+ window_end = window_ptr + spec->block_size;
+
+ spec->chosen_items = &c->chosen_items[spec->window_pos];
+ next_chosen_item = spec->chosen_items;
+ unsigned unseen_cost = 9;
+ while (window_ptr != window_end) {
+
+ lz_match = lzx_choose_item(c);
+
+ LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
+ lz_match.offset == c->max_window_size -
+ LZX_MIN_MATCH_LEN));
+ if (lz_match.len >= LZX_MIN_MATCH_LEN) {
+ lzx_item.data = lzx_tally_match(lz_match.len,
+ lz_match.offset,
+ &freqs, &c->queue);
+ window_ptr += lz_match.len;
+ } else {
+ lzx_item.data = lzx_tally_literal(*window_ptr, &freqs);
+ window_ptr += 1;
+ }
+ *next_chosen_item++ = lzx_item;
+
+ /* When doing one-pass "near-optimal" parsing, update the cost
+ * model occassionally. */
+ if (unlikely((next_chosen_item - spec->chosen_items) % 2048 == 0) &&
+ c->params.choose_item_func == lzx_choose_near_optimal_item &&
+ c->params.num_optim_passes == 1)
+ {
+ lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
+ lzx_set_costs(c, &spec->codes.lens, unseen_cost);
+ if (unseen_cost < 15)
+ unseen_cost++;
+ }
+ }
+ spec->num_chosen_items = next_chosen_item - spec->chosen_items;
+ lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
}
/* Prepare the input window into one or more LZX blocks ready to be output. */
static void
-lzx_prepare_blocks(struct lzx_compressor * ctx)
+lzx_prepare_blocks(struct lzx_compressor *c)
{
/* Set up a default cost model. */
- lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
+ if (c->params.choose_item_func == lzx_choose_near_optimal_item)
+ lzx_set_default_costs(&c->costs, c->num_main_syms);
/* Set up the block specifications.
* TODO: The compression ratio could be slightly improved by performing
* data-dependent block splitting instead of using fixed-size blocks.
* Doing so well is a computationally hard problem, however. */
- ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
- for (unsigned i = 0; i < ctx->num_blocks; i++) {
- unsigned pos = LZX_DIV_BLOCK_SIZE * i;
- ctx->block_specs[i].window_pos = pos;
- ctx->block_specs[i].block_size = min(ctx->window_size - pos,
- LZX_DIV_BLOCK_SIZE);
+ c->num_blocks = DIV_ROUND_UP(c->cur_window_size, LZX_DIV_BLOCK_SIZE);
+ for (unsigned i = 0; i < c->num_blocks; i++) {
+ u32 pos = LZX_DIV_BLOCK_SIZE * i;
+ c->block_specs[i].window_pos = pos;
+ c->block_specs[i].block_size = min(c->cur_window_size - pos,
+ LZX_DIV_BLOCK_SIZE);
}
/* Load the window into the match-finder. */
- lz_bt_load_window(&ctx->mf, ctx->window, ctx->window_size);
+ lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
/* Determine sequence of matches/literals to output for each block. */
- lzx_lru_queue_init(&ctx->queue);
- ctx->optimum_cur_idx = 0;
- ctx->optimum_end_idx = 0;
- for (unsigned i = 0; i < ctx->num_blocks; i++) {
- lzx_optimize_block(ctx, &ctx->block_specs[i],
- ctx->params.alg_params.slow.num_optim_passes);
- }
+ lzx_lru_queue_init(&c->queue);
+ c->optimum_cur_idx = 0;
+ c->optimum_end_idx = 0;
+ c->prev_match.len = 0;
+ for (unsigned i = 0; i < c->num_blocks; i++)
+ lzx_choose_items_for_block(c, &c->block_specs[i]);
}
-/*
- * This is the fast version of lzx_prepare_blocks(). This version "quickly"
- * prepares a single compressed block containing the entire input. See the
- * description of the "Fast algorithm" at the beginning of this file for more
- * information.
- *
- * Input --- the preprocessed data:
- *
- * ctx->window[]
- * ctx->window_size
- *
- * Output --- the block specification and the corresponding match/literal data:
- *
- * ctx->block_specs[]
- * ctx->num_blocks
- * ctx->chosen_matches[]
- */
static void
-lzx_prepare_block_fast(struct lzx_compressor * ctx)
+lzx_build_params(unsigned int compression_level,
+ u32 max_window_size,
+ struct lzx_compressor_params *lzx_params)
{
- struct lzx_record_ctx record_ctx;
- struct lzx_block_spec *spec;
-
- /* Parameters to hash chain LZ match finder
- * (lazy with 1 match lookahead) */
- static const struct lz_params lzx_lz_params = {
- /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
- * aren't worth choosing when using greedy or lazy parsing. */
- .min_match = 3,
- .max_match = LZX_MAX_MATCH_LEN,
- .max_offset = LZX_MAX_WINDOW_SIZE,
- .good_match = LZX_MAX_MATCH_LEN,
- .nice_match = LZX_MAX_MATCH_LEN,
- .max_chain_len = LZX_MAX_MATCH_LEN,
- .max_lazy_match = LZX_MAX_MATCH_LEN,
- .too_far = 4096,
- };
-
- /* Initialize symbol frequencies and match offset LRU queue. */
- memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
- lzx_lru_queue_init(&record_ctx.queue);
- record_ctx.matches = ctx->chosen_matches;
-
- /* Determine series of matches/literals to output. */
- lz_analyze_block(ctx->window,
- ctx->window_size,
- lzx_record_match,
- lzx_record_literal,
- &record_ctx,
- &lzx_lz_params,
- ctx->prev_tab);
-
- /* Set up block specification. */
- spec = &ctx->block_specs[0];
- spec->block_type = LZX_BLOCKTYPE_ALIGNED;
- spec->window_pos = 0;
- spec->block_size = ctx->window_size;
- spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
- spec->chosen_matches = ctx->chosen_matches;
- lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
- ctx->num_main_syms);
- ctx->num_blocks = 1;
+ if (compression_level < 25) {
+ lzx_params->choose_item_func = lzx_choose_lazy_item;
+ lzx_params->num_optim_passes = 1;
+ if (max_window_size <= 262144)
+ lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
+ else
+ lzx_params->mf_algo = LZ_MF_BINARY_TREES;
+ lzx_params->min_match_length = 3;
+ lzx_params->nice_match_length = 25 + compression_level * 2;
+ lzx_params->max_search_depth = 25 + compression_level;
+ } else {
+ lzx_params->choose_item_func = lzx_choose_near_optimal_item;
+ lzx_params->num_optim_passes = compression_level / 20;
+ if (max_window_size <= 32768 && lzx_params->num_optim_passes == 1)
+ lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
+ else
+ lzx_params->mf_algo = LZ_MF_BINARY_TREES;
+ lzx_params->min_match_length = (compression_level >= 45) ? 2 : 3;
+ lzx_params->nice_match_length = min(((u64)compression_level * 32) / 50,
+ LZX_MAX_MATCH_LEN);
+ lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
+ LZX_MAX_MATCH_LEN);
+ }
}
-static size_t
-lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
- void *compressed_data, size_t compressed_size_avail, void *_ctx)
+static void
+lzx_build_mf_params(const struct lzx_compressor_params *lzx_params,
+ u32 max_window_size, struct lz_mf_params *mf_params)
{
- struct lzx_compressor *ctx = _ctx;
- struct output_bitstream ostream;
- size_t compressed_size;
-
- if (uncompressed_size < 100) {
- LZX_DEBUG("Too small to bother compressing.");
- return 0;
- }
-
- if (uncompressed_size > ctx->max_window_size) {
- LZX_DEBUG("Can't compress %zu bytes using window of %u bytes!",
- uncompressed_size, ctx->max_window_size);
- return 0;
- }
-
- LZX_DEBUG("Attempting to compress %zu bytes...",
- uncompressed_size);
-
- /* The input data must be preprocessed. To avoid changing the original
- * input, copy it to a temporary buffer. */
- memcpy(ctx->window, uncompressed_data, uncompressed_size);
- ctx->window_size = uncompressed_size;
-
- /* This line is unnecessary; it just avoids inconsequential accesses of
- * uninitialized memory that would show up in memory-checking tools such
- * as valgrind. */
- memset(&ctx->window[ctx->window_size], 0, 12);
-
- LZX_DEBUG("Preprocessing data...");
-
- /* Before doing any actual compression, do the call instruction (0xe8
- * byte) translation on the uncompressed data. */
- lzx_do_e8_preprocessing(ctx->window, ctx->window_size);
-
- LZX_DEBUG("Preparing blocks...");
-
- /* Prepare the compressed data. */
- if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
- lzx_prepare_block_fast(ctx);
- else
- lzx_prepare_blocks(ctx);
+ memset(mf_params, 0, sizeof(*mf_params));
+
+ mf_params->algorithm = lzx_params->mf_algo;
+ mf_params->max_window_size = max_window_size;
+ mf_params->min_match_len = lzx_params->min_match_length;
+ mf_params->max_match_len = LZX_MAX_MATCH_LEN;
+ mf_params->max_search_depth = lzx_params->max_search_depth;
+ mf_params->nice_match_len = lzx_params->nice_match_length;
+}
- LZX_DEBUG("Writing compressed blocks...");
+static void
+lzx_free_compressor(void *_c);
- /* Generate the compressed data. */
- init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
- lzx_write_all_blocks(ctx, &ostream);
+static u64
+lzx_get_needed_memory(size_t max_block_size, unsigned int compression_level)
+{
+ struct lzx_compressor_params params;
+ u64 size = 0;
+ unsigned window_order;
+ u32 max_window_size;
- LZX_DEBUG("Flushing bitstream...");
- compressed_size = flush_output_bitstream(&ostream);
- if (compressed_size == ~(input_idx_t)0) {
- LZX_DEBUG("Data did not compress to %zu bytes or less!",
- compressed_size_avail);
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return 0;
- }
+ max_window_size = max_block_size;
- LZX_DEBUG("Done: compressed %zu => %zu bytes.",
- uncompressed_size, compressed_size);
-
- /* Verify that we really get the same thing back when decompressing.
- * Although this could be disabled by default in all cases, it only
- * takes around 2-3% of the running time of the slow algorithm to do the
- * verification. */
- if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
- #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
- || 1
- #endif
- )
- {
- struct wimlib_decompressor *decompressor;
+ lzx_build_params(compression_level, max_window_size, ¶ms);
- if (0 == wimlib_create_decompressor(WIMLIB_COMPRESSION_TYPE_LZX,
- ctx->max_window_size,
- NULL,
- &decompressor))
- {
- int ret;
- ret = wimlib_decompress(compressed_data,
- compressed_size,
- ctx->window,
- uncompressed_size,
- decompressor);
- wimlib_free_decompressor(decompressor);
-
- if (ret) {
- ERROR("Failed to decompress data we "
- "compressed using LZX algorithm");
- wimlib_assert(0);
- return 0;
- }
- if (memcmp(uncompressed_data, ctx->window, uncompressed_size)) {
- ERROR("Data we compressed using LZX algorithm "
- "didn't decompress to original");
- wimlib_assert(0);
- return 0;
- }
- } else {
- WARNING("Failed to create decompressor for "
- "data verification!");
- }
- }
- return compressed_size;
-}
+ size += sizeof(struct lzx_compressor);
-static void
-lzx_free_compressor(void *_ctx)
-{
- struct lzx_compressor *ctx = _ctx;
-
- if (ctx) {
- FREE(ctx->chosen_matches);
- FREE(ctx->cached_matches);
- FREE(ctx->optimum);
- lz_bt_destroy(&ctx->mf);
- FREE(ctx->block_specs);
- FREE(ctx->prev_tab);
- FREE(ctx->window);
- FREE(ctx);
- }
-}
+ size += max_window_size;
-static const struct wimlib_lzx_compressor_params lzx_fast_default = {
- .hdr = {
- .size = sizeof(struct wimlib_lzx_compressor_params),
- },
- .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
- .use_defaults = 0,
- .alg_params = {
- .fast = {
- },
- },
-};
-static const struct wimlib_lzx_compressor_params lzx_slow_default = {
- .hdr = {
- .size = sizeof(struct wimlib_lzx_compressor_params),
- },
- .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
- .use_defaults = 0,
- .alg_params = {
- .slow = {
- .use_len2_matches = 1,
- .nice_match_length = 32,
- .num_optim_passes = 2,
- .max_search_depth = 50,
- .main_nostat_cost = 15,
- .len_nostat_cost = 15,
- .aligned_nostat_cost = 7,
- },
- },
-};
+ size += DIV_ROUND_UP(max_window_size, LZX_DIV_BLOCK_SIZE) *
+ sizeof(struct lzx_block_spec);
-static const struct wimlib_lzx_compressor_params *
-lzx_get_params(const struct wimlib_compressor_params_header *_params)
-{
- const struct wimlib_lzx_compressor_params *params =
- (const struct wimlib_lzx_compressor_params*)_params;
+ size += max_window_size * sizeof(struct lzx_item);
- if (params == NULL) {
- LZX_DEBUG("Using default algorithm and parameters.");
- params = &lzx_slow_default;
- } else {
- if (params->use_defaults) {
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
- params = &lzx_slow_default;
- else
- params = &lzx_fast_default;
- }
+ size += lz_mf_get_needed_memory(params.mf_algo, max_window_size);
+ if (params.choose_item_func == lzx_choose_near_optimal_item) {
+ size += (LZX_OPTIM_ARRAY_LENGTH + params.nice_match_length) *
+ sizeof(struct lzx_mc_pos_data);
}
- return params;
+ if (params.num_optim_passes > 1)
+ size += LZX_CACHE_LEN * sizeof(struct lz_match);
+ else
+ size += LZX_MAX_MATCHES_PER_POS * sizeof(struct lz_match);
+ return size;
}
static int
-lzx_create_compressor(size_t window_size,
- const struct wimlib_compressor_params_header *_params,
- void **ctx_ret)
+lzx_create_compressor(size_t max_block_size, unsigned int compression_level,
+ void **c_ret)
{
- const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
- struct lzx_compressor *ctx;
-
- LZX_DEBUG("Allocating LZX context...");
+ struct lzx_compressor *c;
+ struct lzx_compressor_params params;
+ struct lz_mf_params mf_params;
+ unsigned window_order;
+ u32 max_window_size;
+
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
+ return WIMLIB_ERR_INVALID_PARAM;
+ max_window_size = max_block_size;
- if (!lzx_window_size_valid(window_size))
+ lzx_build_params(compression_level, max_window_size, ¶ms);
+ lzx_build_mf_params(¶ms, max_window_size, &mf_params);
+ if (!lz_mf_params_valid(&mf_params))
return WIMLIB_ERR_INVALID_PARAM;
- LZX_DEBUG("Allocating memory.");
+ c = CALLOC(1, sizeof(struct lzx_compressor));
+ if (!c)
+ goto oom;
+
+ c->params = params;
+ c->num_main_syms = lzx_get_num_main_syms(window_order);
+ c->max_window_size = max_window_size;
+ c->window_order = window_order;
- ctx = CALLOC(1, sizeof(struct lzx_compressor));
- if (ctx == NULL)
+ c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
+ if (!c->cur_window)
goto oom;
- ctx->num_main_syms = lzx_get_num_main_syms(window_size);
- ctx->max_window_size = window_size;
- ctx->window = MALLOC(window_size + 12);
- if (ctx->window == NULL)
+ c->block_specs = MALLOC(DIV_ROUND_UP(max_window_size,
+ LZX_DIV_BLOCK_SIZE) *
+ sizeof(struct lzx_block_spec));
+ if (!c->block_specs)
goto oom;
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
- ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
- if (ctx->prev_tab == NULL)
- goto oom;
- }
+ c->chosen_items = MALLOC(max_window_size * sizeof(struct lzx_item));
+ if (!c->chosen_items)
+ goto oom;
- size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
- ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
- if (ctx->block_specs == NULL)
+ c->mf = lz_mf_alloc(&mf_params);
+ if (!c->mf)
goto oom;
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- unsigned min_match_len = LZX_MIN_MATCH_LEN;
- if (!params->alg_params.slow.use_len2_matches)
- min_match_len = max(min_match_len, 3);
-
- if (!lz_bt_init(&ctx->mf,
- window_size,
- min_match_len,
- LZX_MAX_MATCH_LEN,
- params->alg_params.slow.nice_match_length,
- params->alg_params.slow.max_search_depth))
+ if (params.choose_item_func == lzx_choose_near_optimal_item) {
+ c->optimum = MALLOC((LZX_OPTIM_ARRAY_LENGTH +
+ params.nice_match_length) *
+ sizeof(struct lzx_mc_pos_data));
+ if (!c->optimum)
goto oom;
}
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- ctx->optimum = MALLOC((LZX_OPTIM_ARRAY_SIZE +
- min(params->alg_params.slow.nice_match_length,
- LZX_MAX_MATCH_LEN)) *
- sizeof(ctx->optimum[0]));
- if (!ctx->optimum)
+ if (params.num_optim_passes > 1) {
+ c->cached_matches = MALLOC(LZX_CACHE_LEN *
+ sizeof(struct lz_match));
+ if (!c->cached_matches)
goto oom;
- }
-
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- ctx->cached_matches = MALLOC(LZX_CACHE_SIZE);
- if (ctx->cached_matches == NULL)
+ c->cache_limit = c->cached_matches + LZX_CACHE_LEN -
+ (LZX_MAX_MATCHES_PER_POS + 1);
+ } else {
+ c->cached_matches = MALLOC(LZX_MAX_MATCHES_PER_POS *
+ sizeof(struct lz_match));
+ if (!c->cached_matches)
goto oom;
- ctx->cache_limit = ctx->cached_matches +
- LZX_CACHE_LEN - (LZX_MAX_MATCHES_PER_POS + 1);
}
- ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
- if (ctx->chosen_matches == NULL)
- goto oom;
-
- memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_compressor_params));
- memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
-
- LZX_DEBUG("Successfully allocated new LZX context.");
-
- *ctx_ret = ctx;
+ *c_ret = c;
return 0;
oom:
- lzx_free_compressor(ctx);
+ lzx_free_compressor(c);
return WIMLIB_ERR_NOMEM;
}
-static u64
-lzx_get_needed_memory(size_t max_block_size,
- const struct wimlib_compressor_params_header *_params)
+static size_t
+lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
+ void *compressed_data, size_t compressed_size_avail, void *_c)
{
- const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
+ struct lzx_compressor *c = _c;
+ struct lzx_output_bitstream os;
- u64 size = 0;
+ /* Don't bother compressing very small inputs. */
+ if (uncompressed_size < 100)
+ return 0;
- size += sizeof(struct lzx_compressor);
+ /* The input data must be preprocessed. To avoid changing the original
+ * input, copy it to a temporary buffer. */
+ memcpy(c->cur_window, uncompressed_data, uncompressed_size);
+ c->cur_window_size = uncompressed_size;
- size += max_block_size + 12;
+ /* Preprocess the data. */
+ lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
- size += DIV_ROUND_UP(max_block_size, LZX_DIV_BLOCK_SIZE) *
- sizeof(((struct lzx_compressor*)0)->block_specs[0]);
+ /* Prepare the compressed data. */
+ lzx_prepare_blocks(c);
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- size += max_block_size * sizeof(((struct lzx_compressor*)0)->chosen_matches[0]);
- size += lz_bt_get_needed_memory(max_block_size);
- size += (LZX_OPTIM_ARRAY_SIZE +
- min(params->alg_params.slow.nice_match_length,
- LZX_MAX_MATCH_LEN)) *
- sizeof(((struct lzx_compressor *)0)->optimum[0]);
- size += LZX_CACHE_SIZE;
- } else {
- size += max_block_size * sizeof(((struct lzx_compressor*)0)->prev_tab[0]);
- }
- return size;
+ /* Generate the compressed data and return its size, or 0 if an overflow
+ * occurred. */
+ lzx_init_output(&os, compressed_data, compressed_size_avail);
+ lzx_write_all_blocks(c, &os);
+ return lzx_flush_output(&os);
}
-static bool
-lzx_params_valid(const struct wimlib_compressor_params_header *_params)
+static void
+lzx_free_compressor(void *_c)
{
- const struct wimlib_lzx_compressor_params *params =
- (const struct wimlib_lzx_compressor_params*)_params;
-
- if (params->hdr.size != sizeof(struct wimlib_lzx_compressor_params)) {
- LZX_DEBUG("Invalid parameter structure size!");
- return false;
- }
-
- if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
- params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
- {
- LZX_DEBUG("Invalid algorithm.");
- return false;
- }
-
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW &&
- !params->use_defaults)
- {
- if (params->alg_params.slow.num_optim_passes < 1)
- {
- LZX_DEBUG("Invalid number of optimization passes!");
- return false;
- }
-
- if (params->alg_params.slow.main_nostat_cost < 1 ||
- params->alg_params.slow.main_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid main_nostat_cost!");
- return false;
- }
-
- if (params->alg_params.slow.len_nostat_cost < 1 ||
- params->alg_params.slow.len_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid len_nostat_cost!");
- return false;
- }
-
- if (params->alg_params.slow.aligned_nostat_cost < 1 ||
- params->alg_params.slow.aligned_nostat_cost > 8)
- {
- LZX_DEBUG("Invalid aligned_nostat_cost!");
- return false;
- }
+ struct lzx_compressor *c = _c;
+
+ if (c) {
+ ALIGNED_FREE(c->cur_window);
+ FREE(c->block_specs);
+ FREE(c->chosen_items);
+ lz_mf_free(c->mf);
+ FREE(c->optimum);
+ FREE(c->cached_matches);
+ FREE(c);
}
- return true;
}
const struct compressor_ops lzx_compressor_ops = {
- .params_valid = lzx_params_valid,
.get_needed_memory = lzx_get_needed_memory,
.create_compressor = lzx_create_compressor,
.compress = lzx_compress,