/*
* lzx-compress.c
*
- * LZX compression routines
+ * A compressor that produces output compatible with the LZX compression format.
*/
/*
- * Copyright (C) 2012, 2013 Eric Biggers
+ * Copyright (C) 2012, 2013, 2014 Eric Biggers
*
- * This file is part of wimlib, a library for working with WIM files.
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
*
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 3 of the License, or (at your option)
- * any later version.
- *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
- * You should have received a copy of the GNU General Public License
- * along with wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
*/
/*
- * This file contains a compressor for the LZX compression format, as used in
- * the WIM file format.
- *
- * Format
- * ======
+ * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
+ * compression format, as used in the WIM (Windows IMaging) file format.
*
- * First, the primary reference for the LZX compression format is the
- * specification released by Microsoft.
+ * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
+ * "Near-optimal" is significantly slower than "lazy", but results in a better
+ * compression ratio. The "near-optimal" algorithm is used at the default
+ * compression level.
*
- * Second, the comments in lzx-decompress.c provide some more information about
- * the LZX compression format, including errors in the Microsoft specification.
+ * This file may need some slight modifications to be used outside of the WIM
+ * format. In particular, in other situations the LZX block header might be
+ * slightly different, and a sliding window rather than a fixed-size window
+ * might be required.
*
- * Do note that LZX shares many similarities with DEFLATE, the algorithm used by
- * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding,
- * and certain other details are quite similar, such as the method for storing
- * Huffman codes. However, some of the main differences are:
+ * Note: LZX is a compression format derived from DEFLATE, the format used by
+ * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
+ * Certain details are quite similar, such as the method for storing Huffman
+ * codes. However, the main differences are:
*
* - LZX preprocesses the data to attempt to make x86 machine code slightly more
* compressible before attempting to compress it further.
+ *
* - LZX uses a "main" alphabet which combines literals and matches, with the
* match symbols containing a "length header" (giving all or part of the match
- * length) and a "position slot" (giving, roughly speaking, the order of
+ * length) and an "offset slot" (giving, roughly speaking, the order of
* magnitude of the match offset).
- * - LZX does not have static Huffman blocks; however it does have two types of
- * dynamic Huffman blocks ("aligned offset" and "verbatim").
- * - LZX has a minimum match length of 2 rather than 3.
- * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
- * queue of match offsets. This is very useful for certain types of files,
- * such as binary files that have repeating records.
- *
- * Algorithms
- * ==========
- *
- * There are actually two distinct overall algorithms implemented here. We
- * shall refer to them as the "slow" algorithm and the "fast" algorithm. The
- * "slow" algorithm spends more time compressing to achieve a higher compression
- * ratio compared to the "fast" algorithm. More details are presented below.
- *
- * Slow algorithm
- * --------------
- *
- * The "slow" algorithm to generate LZX-compressed data is roughly as follows:
- *
- * 1. Preprocess the input data to translate the targets of x86 call
- * instructions to absolute offsets.
- *
- * 2. Build the suffix array and inverse suffix array for the input data. The
- * suffix array contains the indices of all suffixes of the input data,
- * sorted lexcographically by the corresponding suffixes. The "position" of
- * a suffix is the index of that suffix in the original string, whereas the
- * "rank" of a suffix is the index at which that suffix's position is found
- * in the suffix array.
- *
- * 3. Build the longest common prefix array corresponding to the suffix array.
- *
- * 4. For each suffix, find the highest lower ranked suffix that has a lower
- * position, the lowest higher ranked suffix that has a lower position, and
- * the length of the common prefix shared between each. This information is
- * later used to link suffix ranks into a doubly-linked list for searching
- * the suffix array.
- *
- * 5. Set a default cost model for matches/literals.
- *
- * 6. Determine the lowest cost sequence of LZ77 matches ((offset, length)
- * pairs) and literal bytes to divide the input into. Raw match-finding is
- * done by searching the suffix array using a linked list to avoid
- * considering any suffixes that start after the current position. Each run
- * of the match-finder returns the approximate lowest-cost longest match as
- * well as any shorter matches that have even lower approximate costs. Each
- * such run also adds the suffix rank of the current position into the linked
- * list being used to search the suffix array. Parsing, or match-choosing,
- * is solved as a minimum-cost path problem using a forward "optimal parsing"
- * algorithm based on the Deflate encoder from 7-Zip. This algorithm moves
- * forward calculating the minimum cost to reach each byte until either a
- * very long match is found or until a position is found at which no matches
- * start or overlap.
- *
- * 7. Build the Huffman codes needed to output the matches/literals.
- *
- * 8. Up to a certain number of iterations, use the resulting Huffman codes to
- * refine a cost model and go back to Step #6 to determine an improved
- * sequence of matches and literals.
- *
- * 9. Output the resulting block using the match/literal sequences and the
- * Huffman codes that were computed for the block.
- *
- * Note: the algorithm does not yet attempt to split the input into multiple LZX
- * blocks, instead using a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
- *
- * Fast algorithm
- * --------------
- *
- * The fast algorithm (and the only one available in wimlib v1.5.1 and earlier)
- * spends much less time on the main bottlenecks of the compression process ---
- * that is, the match finding and match choosing. Matches are found and chosen
- * with hash chains using a greedy parse with one position of look-ahead. No
- * block splitting is done; only compressing the full input into an aligned
- * offset block is considered.
- *
- * API
- * ===
- *
- * The old API (retained for backward compatibility) consists of just one
- * function:
- *
- * wimlib_lzx_compress()
- *
- * The new compressor has more potential parameters and needs more memory, so
- * the new API ties up memory allocations and compression parameters into a
- * context:
- *
- * wimlib_lzx_alloc_context()
- * wimlib_lzx_compress2()
- * wimlib_lzx_free_context()
- * wimlib_lzx_set_default_params()
- *
- * Both wimlib_lzx_compress() and wimlib_lzx_compress2() are designed to
- * compress an in-memory buffer of up to the window size, which can be any power
- * of two between 2^15 and 2^21 inclusively. However, by default, the WIM
- * format uses 2^15, and this is seemingly the only value that is compatible
- * with WIMGAPI. In any case, the window is not a true "sliding window" since
- * no data is ever "slid out" of the window. This is needed for the WIM format,
- * which is designed such that chunks may be randomly accessed.
- *
- * Both wimlib_lzx_compress() and wimlib_lzx_compress2() return 0 if the data
- * could not be compressed to less than the size of the uncompressed data.
- * Again, this is suitable for the WIM format, which stores such data chunks
- * uncompressed.
- *
- * The functions in this LZX compression API are exported from the library,
- * although with the possible exception of wimlib_lzx_set_default_params(), this
- * is only in case other programs happen to have uses for it other than WIM
- * reading/writing as already handled through the rest of the library.
- *
- * Acknowledgments
- * ===============
- *
- * Acknowledgments to several open-source projects and research papers that made
- * it possible to implement this code:
- *
- * - divsufsort (author: Yuta Mori), for the suffix array construction code,
- * located in a separate directory (divsufsort/).
- *
- * - "Linear-Time Longest-Common-Prefix Computation in Suffix Arrays and Its
- * Applications" (Kasai et al. 2001), for the LCP array computation.
- *
- * - "LPF computation revisited" (Crochemore et al. 2009) for the prev and next
- * array computations.
*
- * - 7-Zip (author: Igor Pavlov) for the algorithm for forward optimal parsing
- * (match-choosing).
+ * - LZX does not have static Huffman blocks (that is, the kind with preset
+ * Huffman codes); however it does have two types of dynamic Huffman blocks
+ * ("verbatim" and "aligned").
*
- * - zlib (author: Jean-loup Gailly and Mark Adler), for the hash table
- * match-finding algorithm (used in lz77.c).
+ * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
+ * useful, but generally only if the parser is smart about choosing them.
*
- * - lzx-compress (author: Matthew T. Russotto), on which some parts of this
- * code were originally based.
+ * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
+ * of match offsets. This is very useful for certain types of files, such as
+ * binary files that have repeating records.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-#include "wimlib.h"
-#include "wimlib/compress.h"
+#include "wimlib/compress_common.h"
+#include "wimlib/compressor_ops.h"
#include "wimlib/endianness.h"
#include "wimlib/error.h"
+#include "wimlib/lz_mf.h"
+#include "wimlib/lz_repsearch.h"
#include "wimlib/lzx.h"
#include "wimlib/util.h"
-#include <pthread.h>
-#include <math.h>
+
#include <string.h>
+#include <limits.h>
-#ifdef ENABLE_LZX_DEBUG
-# include "wimlib/decompress.h"
-#endif
+#define LZX_OPTIM_ARRAY_LENGTH 4096
-#include "divsufsort/divsufsort.h"
+#define LZX_DIV_BLOCK_SIZE 32768
-typedef u32 block_cost_t;
-#define INFINITE_BLOCK_COST ((block_cost_t)~0U)
+#define LZX_CACHE_PER_POS 8
-#define LZX_OPTIM_ARRAY_SIZE 4096
+#define LZX_MAX_MATCHES_PER_POS (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
-#define LZX_DIV_BLOCK_SIZE 32768
+#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
-#define LZX_MAX_CACHE_PER_POS 10
+struct lzx_compressor;
-/* Codewords for the LZX main, length, and aligned offset Huffman codes */
+/* Codewords for the LZX Huffman codes. */
struct lzx_codewords {
- u16 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- u16 len[LZX_LENCODE_NUM_SYMBOLS];
- u16 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
+ u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
+ u32 len[LZX_LENCODE_NUM_SYMBOLS];
+ u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Codeword lengths (in bits) for the LZX main, length, and aligned offset
- * Huffman codes.
- *
- * A 0 length means the codeword has zero frequency.
- */
+/* Codeword lengths (in bits) for the LZX Huffman codes.
+ * A zero length means the corresponding codeword has zero frequency. */
struct lzx_lens {
u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u8 len[LZX_LENCODE_NUM_SYMBOLS];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Costs for the LZX main, length, and aligned offset Huffman symbols.
- *
- * If a codeword has zero frequency, it must still be assigned some nonzero cost
- * --- generally a high cost, since even if it gets used in the next iteration,
- * it probably will not be used very times. */
+/* Estimated cost, in bits, to output each symbol in the LZX Huffman codes. */
struct lzx_costs {
u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u8 len[LZX_LENCODE_NUM_SYMBOLS];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* The LZX main, length, and aligned offset Huffman codes */
+/* Codewords and lengths for the LZX Huffman codes. */
struct lzx_codes {
struct lzx_codewords codewords;
struct lzx_lens lens;
};
-/* Tables for tallying symbol frequencies in the three LZX alphabets */
+/* Symbol frequency counters for the LZX Huffman codes. */
struct lzx_freqs {
- input_idx_t main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- input_idx_t len[LZX_LENCODE_NUM_SYMBOLS];
- input_idx_t aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
+ u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
+ u32 len[LZX_LENCODE_NUM_SYMBOLS];
+ u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* LZX intermediate match/literal format */
-struct lzx_match {
- /* Bit Description
- *
- * 31 1 if a match, 0 if a literal.
- *
- * 30-25 position slot. This can be at most 50, so it will fit in 6
- * bits.
- *
- * 8-24 position footer. This is the offset of the real formatted
- * offset from the position base. This can be at most 17 bits
- * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
- *
- * 0-7 length of match, minus 2. This can be at most
- * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
- u32 data;
-};
+/* Intermediate LZX match/literal format */
+struct lzx_item {
-/* Raw LZ match/literal format: just a length and offset.
- *
- * The length is the number of bytes of the match, and the offset is the number
- * of bytes back in the input the match is from the current position.
- *
- * If @len < LZX_MIN_MATCH_LEN, then it's really just a literal byte and @offset is
- * meaningless. */
-struct raw_match {
- u16 len;
- input_idx_t offset;
+ /* Bits 0 - 9: Main symbol
+ * Bits 10 - 17: Length symbol
+ * Bits 18 - 22: Number of extra offset bits
+ * Bits 23+ : Extra offset bits */
+ u64 data;
};
-/* Specification for an LZX block. */
-struct lzx_block_spec {
-
- /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
- * block. */
- int block_type;
-
- /* 0-based position in the window at which this block starts. */
- input_idx_t window_pos;
-
- /* The number of bytes of uncompressed data this block represents. */
- input_idx_t block_size;
-
- /* The position in the 'chosen_matches' array in the `struct
- * lzx_compressor' at which the match/literal specifications for
- * this block begin. */
- input_idx_t chosen_matches_start_pos;
-
- /* The number of match/literal specifications for this block. */
- input_idx_t num_chosen_matches;
-
- /* Huffman codes for this block. */
- struct lzx_codes codes;
+/* Internal compression parameters */
+struct lzx_compressor_params {
+ u32 (*choose_items_for_block)(struct lzx_compressor *, u32, u32);
+ u32 num_optim_passes;
+ enum lz_mf_algo mf_algo;
+ u32 min_match_length;
+ u32 nice_match_length;
+ u32 max_search_depth;
};
/*
- * An array of these structures is used during the match-choosing algorithm.
- * They correspond to consecutive positions in the window and are used to keep
- * track of the cost to reach each position, and the match/literal choices that
- * need to be chosen to reach that position.
+ * Match chooser position data:
+ *
+ * An array of these structures is used during the near-optimal match-choosing
+ * algorithm. They correspond to consecutive positions in the window and are
+ * used to keep track of the cost to reach each position, and the match/literal
+ * choices that need to be chosen to reach that position.
*/
-struct lzx_optimal {
- /* The approximate minimum cost, in bits, to reach this position in the
- * window which has been found so far. */
- block_cost_t cost;
-
- /* The union here is just for clarity, since the fields are used in two
- * slightly different ways. Initially, the @prev structure is filled in
- * first, and links go from later in the window to earlier in the
- * window. Later, @next structure is filled in and links go from
- * earlier in the window to later in the window. */
- union {
- struct {
- /* Position of the start of the match or literal that
- * was taken to get to this position in the approximate
- * minimum-cost parse. */
- input_idx_t link;
-
- /* Offset (as in an LZ (length, offset) pair) of the
- * match or literal that was taken to get to this
- * position in the approximate minimum-cost parse. */
- input_idx_t match_offset;
- } prev;
- struct {
- /* Position at which the match or literal starting at
- * this position ends in the minimum-cost parse. */
- input_idx_t link;
-
- /* Offset (as in an LZ (length, offset) pair) of the
- * match or literal starting at this position in the
- * approximate minimum-cost parse. */
- input_idx_t match_offset;
- } next;
- };
-
- /* The match offset LRU queue that will exist when the approximate
- * minimum-cost path to reach this position is taken. */
- struct lzx_lru_queue queue;
-};
+struct lzx_mc_pos_data {
-/* Suffix array link */
-struct salink {
- /* Rank of highest ranked suffix that has rank lower than the suffix
- * corresponding to this structure and either has a lower position
- * (initially) or has a position lower than the highest position at
- * which matches have been searched for so far, or -1 if there is no
- * such suffix. */
- input_idx_t prev;
-
- /* Rank of lowest ranked suffix that has rank greater than the suffix
- * corresponding to this structure and either has a lower position
- * (intially) or has a position lower than the highest position at which
- * matches have been searched for so far, or -1 if there is no such
- * suffix. */
- input_idx_t next;
-
- /* Length of longest common prefix between the suffix corresponding to
- * this structure and the suffix with rank @prev, or 0 if @prev is -1.
- */
- input_idx_t lcpprev;
+ /* The cost, in bits, of the lowest-cost path that has been found to
+ * reach this position. This can change as progressively lower cost
+ * paths are found to reach this position. */
+ u32 cost;
+#define MC_INFINITE_COST UINT32_MAX
- /* Length of longest common prefix between the suffix corresponding to
- * this structure and the suffix with rank @next, or 0 if @next is -1.
+ /* The match or literal that was taken to reach this position. This can
+ * change as progressively lower cost paths are found to reach this
+ * position.
+ *
+ * This variable is divided into two bitfields.
+ *
+ * Literals:
+ * Low bits are 1, high bits are the literal.
+ *
+ * Explicit offset matches:
+ * Low bits are the match length, high bits are the offset plus 2.
+ *
+ * Repeat offset matches:
+ * Low bits are the match length, high bits are the queue index.
*/
- input_idx_t lcpnext;
-};
+ u32 mc_item_data;
+#define MC_OFFSET_SHIFT 9
+#define MC_LEN_MASK ((1 << MC_OFFSET_SHIFT) - 1)
-/* State of the LZX compressor. */
+ /* The state of the LZX recent match offsets queue at this position.
+ * This is filled in lazily, only after the minimum-cost path to this
+ * position is found.
+ *
+ * Note: the way we handle this adaptive state in the "minimum-cost"
+ * parse is actually only an approximation. It's possible for the
+ * globally optimal, minimum cost path to contain a prefix, ending at a
+ * position, where that path prefix is *not* the minimum cost path to
+ * that position. This can happen if such a path prefix results in a
+ * different adaptive state which results in lower costs later. We do
+ * not solve this problem; we only consider the lowest cost to reach
+ * each position, which seems to be an acceptable approximation. */
+ struct lzx_lru_queue queue _aligned_attribute(16);
+
+} _aligned_attribute(16);
+
+/* State of the LZX compressor */
struct lzx_compressor {
- /* The parameters that were used to create the compressor. */
- struct wimlib_lzx_params params;
+ /* Internal compression parameters */
+ struct lzx_compressor_params params;
- /* The buffer of data to be compressed.
- *
- * 0xe8 byte preprocessing is done directly on the data here before
- * further compression.
- *
- * Note that this compressor does *not* use a real sliding window!!!!
- * It's not needed in the WIM format, since every chunk is compressed
- * independently. This is by design, to allow random access to the
- * chunks.
- *
- * We reserve a few extra bytes to potentially allow reading off the end
- * of the array in the match-finding code for optimization purposes.
- */
- u8 *window;
+ /* The preprocessed buffer of data being compressed */
+ u8 *cur_window;
/* Number of bytes of data to be compressed, which is the number of
- * bytes of data in @window that are actually valid. */
- input_idx_t window_size;
-
- /* Allocated size of the @window. */
- input_idx_t max_window_size;
+ * bytes of data in @cur_window that are actually valid. */
+ u32 cur_window_size;
- /* Number of symbols in the main alphabet (depends on the
- * @max_window_size since it determines the maximum allowed offset). */
+ /* log2 order of the LZX window size for LZ match offset encoding
+ * purposes. Will be >= LZX_MIN_WINDOW_ORDER and <=
+ * LZX_MAX_WINDOW_ORDER.
+ *
+ * Note: 1 << @window_order is normally equal to @max_window_size,
+ * a.k.a. the allocated size of @cur_window, but it will be greater than
+ * @max_window_size in the event that the compressor was created with a
+ * non-power-of-2 block size. (See lzx_get_window_order().) */
+ unsigned window_order;
+
+ /* Number of symbols in the main alphabet. This depends on
+ * @window_order, since @window_order determines the maximum possible
+ * offset. It does not, however, depend on the *actual* size of the
+ * current data buffer being processed, which might be less than 1 <<
+ * @window_order. */
unsigned num_main_syms;
+ /* Lempel-Ziv match-finder */
+ struct lz_mf *mf;
+
+ /* Match-finder wrapper functions and data for near-optimal parsing.
+ *
+ * When doing more than one match-choosing pass over the data, matches
+ * found by the match-finder are cached to achieve a slight speedup when
+ * the same matches are needed on subsequent passes. This is suboptimal
+ * because different matches may be preferred with different cost
+ * models, but it is a very worthwhile speedup. */
+ unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
+ void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
+ u32 match_window_pos;
+ u32 match_window_end;
+ struct lz_match *cached_matches;
+ struct lz_match *cache_ptr;
+ struct lz_match *cache_limit;
+
+ /* Position data for near-optimal parsing. */
+ struct lzx_mc_pos_data optimum[LZX_OPTIM_ARRAY_LENGTH + LZX_MAX_MATCH_LEN];
+
+ /* The cost model currently being used for near-optimal parsing. */
+ struct lzx_costs costs;
+
/* The current match offset LRU queue. */
struct lzx_lru_queue queue;
- /* Space for the sequences of matches/literals that were chosen for each
- * block. */
- struct lzx_match *chosen_matches;
+ /* Frequency counters for the current block. */
+ struct lzx_freqs freqs;
- /* Information about the LZX blocks the preprocessed input was divided
- * into. */
- struct lzx_block_spec *block_specs;
+ /* The Huffman codes for the current and previous blocks. */
+ struct lzx_codes codes[2];
- /* Number of LZX blocks the input was divided into; a.k.a. the number of
- * elements of @block_specs that are valid. */
- unsigned num_blocks;
+ /* Which 'struct lzx_codes' is being used for the current block. The
+ * other was used for the previous block (if this isn't the first
+ * block). */
+ unsigned int codes_index;
- /* This is simply filled in with zeroes and used to avoid special-casing
- * the output of the first compressed Huffman code, which conceptually
- * has a delta taken from a code with all symbols having zero-length
- * codewords. */
- struct lzx_codes zero_codes;
+ /* Dummy lengths that are always 0. */
+ struct lzx_lens zero_lens;
- /* The current cost model. */
- struct lzx_costs costs;
+ /* Matches/literals that were chosen for the current block. */
+ struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE];
- /* Fast algorithm only: Array of hash table links. */
- input_idx_t *prev_tab;
+ /* Table mapping match offset => offset slot for small offsets */
+#define LZX_NUM_FAST_OFFSETS 32768
+ u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
+};
- /* Suffix array for window.
- * This is a mapping from suffix rank to suffix position. */
- input_idx_t *SA;
+/*
+ * Structure to keep track of the current state of sending bits to the
+ * compressed output buffer.
+ *
+ * The LZX bitstream is encoded as a sequence of 16-bit coding units.
+ */
+struct lzx_output_bitstream {
- /* Inverse suffix array for window.
- * This is a mapping from suffix position to suffix rank.
- * If 0 <= r < window_size, then ISA[SA[r]] == r. */
- input_idx_t *ISA;
+ /* Bits that haven't yet been written to the output buffer. */
+ u32 bitbuf;
- /* Longest common prefix array corresponding to the suffix array SA.
- * LCP[i] is the length of the longest common prefix between the
- * suffixes with positions SA[i - 1] and SA[i]. LCP[0] is undefined.
- */
- input_idx_t *LCP;
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitcount;
- /* Suffix array links.
- *
- * During a linear scan of the input string to find matches, this array
- * used to keep track of which rank suffixes in the suffix array appear
- * before the current position. Instead of searching in the original
- * suffix array, scans for matches at a given position traverse a linked
- * list containing only suffixes that appear before that position. */
- struct salink *salink;
-
- /* Position in window of next match to return. */
- input_idx_t match_window_pos;
-
- /* The match-finder shall ensure the length of matches does not exceed
- * this position in the input. */
- input_idx_t match_window_end;
-
- /* Matches found by the match-finder are cached in the following array
- * to achieve a slight speedup when the same matches are needed on
- * subsequent passes. This is suboptimal because different matches may
- * be preferred with different cost models, but seems to be a worthwhile
- * speedup. */
- struct raw_match *cached_matches;
- unsigned cached_matches_pos;
- bool matches_cached;
-
- /* Slow algorithm only: Temporary space used for match-choosing
- * algorithm.
- *
- * The size of this array must be at least LZX_MAX_MATCH_LEN but
- * otherwise is arbitrary. More space simply allows the match-choosing
- * algorithm to potentially find better matches (depending on the input,
- * as always). */
- struct lzx_optimal *optimum;
+ /* Pointer to the start of the output buffer. */
+ le16 *start;
- /* Slow algorithm only: Variables used by the match-choosing algorithm.
- *
- * When matches have been chosen, optimum_cur_idx is set to the position
- * in the window of the next match/literal to return and optimum_end_idx
- * is set to the position in the window at the end of the last
- * match/literal to return. */
- u32 optimum_cur_idx;
- u32 optimum_end_idx;
+ /* Pointer to the position in the output buffer at which the next coding
+ * unit should be written. */
+ le16 *next;
+
+ /* Pointer past the end of the output buffer. */
+ le16 *end;
};
-/* Returns the LZX position slot that corresponds to a given match offset,
- * taking into account the recent offset queue and updating it if the offset is
- * found in it. */
-static unsigned
-lzx_get_position_slot(unsigned offset, struct lzx_lru_queue *queue)
+/*
+ * Initialize the output bitstream.
+ *
+ * @os
+ * The output bitstream structure to initialize.
+ * @buffer
+ * The buffer being written to.
+ * @size
+ * Size of @buffer, in bytes.
+ */
+static void
+lzx_init_output(struct lzx_output_bitstream *os, void *buffer, u32 size)
+{
+ os->bitbuf = 0;
+ os->bitcount = 0;
+ os->start = buffer;
+ os->next = os->start;
+ os->end = os->start + size / sizeof(le16);
+}
+
+/*
+ * Write some bits to the output bitstream.
+ *
+ * The bits are given by the low-order @num_bits bits of @bits. Higher-order
+ * bits in @bits cannot be set. At most 17 bits can be written at once.
+ *
+ * @max_num_bits is a compile-time constant that specifies the maximum number of
+ * bits that can ever be written at the call site. Currently, it is used to
+ * optimize away the conditional code for writing a second 16-bit coding unit
+ * when writing fewer than 17 bits.
+ *
+ * If the output buffer space is exhausted, then the bits will be ignored, and
+ * lzx_flush_output() will return 0 when it gets called.
+ */
+static inline void
+lzx_write_varbits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits,
+ const unsigned int max_num_bits)
{
- unsigned position_slot;
-
- /* See if the offset was recently used. */
- for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- if (offset == queue->R[i]) {
- /* Found it. */
-
- /* Bring the repeat offset to the front of the
- * queue. Note: this is, in fact, not a real
- * LRU queue because repeat matches are simply
- * swapped to the front. */
- swap(queue->R[0], queue->R[i]);
-
- /* The resulting position slot is simply the first index
- * at which the offset was found in the queue. */
- return i;
+ /* This code is optimized for LZX, which never needs to write more than
+ * 17 bits at once. */
+ LZX_ASSERT(num_bits <= 17);
+ LZX_ASSERT(num_bits <= max_num_bits);
+ LZX_ASSERT(os->bitcount <= 15);
+
+ /* Add the bits to the bit buffer variable. @bitcount will be at most
+ * 15, so there will be just enough space for the maximum possible
+ * @num_bits of 17. */
+ os->bitcount += num_bits;
+ os->bitbuf = (os->bitbuf << num_bits) | bits;
+
+ /* Check whether any coding units need to be written. */
+ if (os->bitcount >= 16) {
+
+ os->bitcount -= 16;
+
+ /* Write a coding unit, unless it would overflow the buffer. */
+ if (os->next != os->end)
+ put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
+
+ /* If writing 17 bits, a second coding unit might need to be
+ * written. But because 'max_num_bits' is a compile-time
+ * constant, the compiler will optimize away this code at most
+ * call sites. */
+ if (max_num_bits == 17 && os->bitcount == 16) {
+ if (os->next != os->end)
+ put_unaligned_u16_le(os->bitbuf, os->next++);
+ os->bitcount = 0;
}
}
+}
+
+/* Use when @num_bits is a compile-time constant. Otherwise use
+ * lzx_write_varbits(). */
+static inline void
+lzx_write_bits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits)
+{
+ lzx_write_varbits(os, bits, num_bits, num_bits);
+}
- /* The offset was not recently used; look up its real position slot. */
- position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
+/*
+ * Flush the last coding unit to the output buffer if needed. Return the total
+ * number of bytes written to the output buffer, or 0 if an overflow occurred.
+ */
+static u32
+lzx_flush_output(struct lzx_output_bitstream *os)
+{
+ if (os->next == os->end)
+ return 0;
- /* Bring the new offset to the front of the queue. */
- for (unsigned i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
- queue->R[i] = queue->R[i - 1];
- queue->R[0] = offset;
+ if (os->bitcount != 0)
+ put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
- return position_slot;
+ return (const u8 *)os->next - (const u8 *)os->start;
}
/* Build the main, length, and aligned offset Huffman codes used in LZX.
* This takes as input the frequency tables for each code and produces as output
* a set of tables that map symbols to codewords and codeword lengths. */
static void
-lzx_make_huffman_codes(const struct lzx_freqs *freqs,
- struct lzx_codes *codes,
+lzx_make_huffman_codes(const struct lzx_freqs *freqs, struct lzx_codes *codes,
unsigned num_main_syms)
{
make_canonical_huffman_code(num_main_syms,
codes->codewords.aligned);
}
-/*
- * Output an LZX match.
- *
- * @out: The bitstream to write the match to.
- * @block_type: The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM)
- * @match: The match.
- * @codes: Pointer to a structure that contains the codewords for the
- * main, length, and aligned offset Huffman codes.
- */
-static void
-lzx_write_match(struct output_bitstream *out, int block_type,
- struct lzx_match match, const struct lzx_codes *codes)
+static unsigned
+lzx_compute_precode_items(const u8 lens[restrict],
+ const u8 prev_lens[restrict],
+ const unsigned num_lens,
+ u32 precode_freqs[restrict],
+ unsigned precode_items[restrict])
{
- /* low 8 bits are the match length minus 2 */
- unsigned match_len_minus_2 = match.data & 0xff;
- /* Next 17 bits are the position footer */
- unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
- /* Next 6 bits are the position slot. */
- unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
- unsigned len_header;
- unsigned len_footer;
- unsigned main_symbol;
- unsigned num_extra_bits;
- unsigned verbatim_bits;
- unsigned aligned_bits;
-
- /* If the match length is less than MIN_MATCH_LEN (= 2) +
- * NUM_PRIMARY_LENS (= 7), the length header contains
- * the match length minus MIN_MATCH_LEN, and there is no
- * length footer.
- *
- * Otherwise, the length header contains
- * NUM_PRIMARY_LENS, and the length footer contains
- * the match length minus NUM_PRIMARY_LENS minus
- * MIN_MATCH_LEN. */
- if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
- len_header = match_len_minus_2;
- /* No length footer-- mark it with a special
- * value. */
- len_footer = (unsigned)(-1);
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
- }
-
- /* Combine the position slot with the length header into a single symbol
- * that will be encoded with the main code.
- *
- * The actual main symbol is offset by LZX_NUM_CHARS because values
- * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
- * match. */
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
-
- /* Output main symbol. */
- bitstream_put_bits(out, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol]);
-
- /* If there is a length footer, output it using the
- * length Huffman code. */
- if (len_footer != (unsigned)(-1)) {
- bitstream_put_bits(out, codes->codewords.len[len_footer],
- codes->lens.len[len_footer]);
- }
-
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
-
- /* For aligned offset blocks with at least 3 extra bits, output the
- * verbatim bits literally, then the aligned bits encoded using the
- * aligned offset code. Otherwise, only the verbatim bits need to be
- * output. */
- if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
+ unsigned *itemptr;
+ unsigned run_start;
+ unsigned run_end;
+ unsigned extra_bits;
+ int delta;
+ u8 len;
+
+ itemptr = precode_items;
+ run_start = 0;
+ do {
+ /* Find the next run of codeword lengths. */
- verbatim_bits = position_footer >> 3;
- bitstream_put_bits(out, verbatim_bits,
- num_extra_bits - 3);
+ /* len = the length being repeated */
+ len = lens[run_start];
- aligned_bits = (position_footer & 7);
- bitstream_put_bits(out,
- codes->codewords.aligned[aligned_bits],
- codes->lens.aligned[aligned_bits]);
- } else {
- /* verbatim bits is the same as the position
- * footer, in this case. */
- bitstream_put_bits(out, position_footer, num_extra_bits);
- }
-}
+ run_end = run_start + 1;
-static unsigned
-lzx_build_precode(const u8 lens[restrict],
- const u8 prev_lens[restrict],
- const unsigned num_syms,
- input_idx_t precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
- u8 output_syms[restrict num_syms],
- u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
- u16 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
- unsigned *num_additional_bits_ret)
-{
- memset(precode_freqs, 0,
- LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
-
- /* Since the code word lengths use a form of RLE encoding, the goal here
- * is to find each run of identical lengths when going through them in
- * symbol order (including runs of length 1). For each run, as many
- * lengths are encoded using RLE as possible, and the rest are output
- * literally.
- *
- * output_syms[] will be filled in with the length symbols that will be
- * output, including RLE codes, not yet encoded using the precode.
- *
- * cur_run_len keeps track of how many code word lengths are in the
- * current run of identical lengths. */
- unsigned output_syms_idx = 0;
- unsigned cur_run_len = 1;
- unsigned num_additional_bits = 0;
- for (unsigned i = 1; i <= num_syms; i++) {
-
- if (i != num_syms && lens[i] == lens[i - 1]) {
- /* Still in a run--- keep going. */
- cur_run_len++;
+ /* Fast case for a single length. */
+ if (likely(run_end == num_lens || len != lens[run_end])) {
+ delta = prev_lens[run_start] - len;
+ if (delta < 0)
+ delta += 17;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
continue;
}
- /* Run ended! Check if it is a run of zeroes or a run of
- * nonzeroes. */
-
- /* The symbol that was repeated in the run--- not to be confused
- * with the length *of* the run (cur_run_len) */
- unsigned len_in_run = lens[i - 1];
-
- if (len_in_run == 0) {
- /* A run of 0's. Encode it in as few length
- * codes as we can. */
+ /* Extend the run. */
+ do {
+ run_end++;
+ } while (run_end != num_lens && len == lens[run_end]);
- /* The magic length 18 indicates a run of 20 + n zeroes,
- * where n is an uncompressed literal 5-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 20) {
- unsigned additional_bits;
+ if (len == 0) {
+ /* Run of zeroes. */
- additional_bits = min(cur_run_len - 20, 0x1f);
- num_additional_bits += 5;
+ /* Symbol 18: RLE 20 to 51 zeroes at a time. */
+ while ((run_end - run_start) >= 20) {
+ extra_bits = min((run_end - run_start) - 20, 0x1f);
precode_freqs[18]++;
- output_syms[output_syms_idx++] = 18;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 20 + additional_bits;
+ *itemptr++ = 18 | (extra_bits << 5);
+ run_start += 20 + extra_bits;
}
- /* The magic length 17 indicates a run of 4 + n zeroes,
- * where n is an uncompressed literal 4-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
-
- additional_bits = min(cur_run_len - 4, 0xf);
- num_additional_bits += 4;
+ /* Symbol 17: RLE 4 to 19 zeroes at a time. */
+ if ((run_end - run_start) >= 4) {
+ extra_bits = min((run_end - run_start) - 4, 0xf);
precode_freqs[17]++;
- output_syms[output_syms_idx++] = 17;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 4 + additional_bits;
+ *itemptr++ = 17 | (extra_bits << 5);
+ run_start += 4 + extra_bits;
}
-
} else {
/* A run of nonzero lengths. */
- /* The magic length 19 indicates a run of 4 + n
- * nonzeroes, where n is a literal bit that follows the
- * magic length, and where the value of the lengths in
- * the run is given by an extra length symbol, encoded
- * with the precode, that follows the literal bit.
- *
- * The extra length symbol is encoded as a difference
- * from the length of the codeword for the first symbol
- * in the run in the previous code.
- * */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
- signed char delta;
-
- additional_bits = (cur_run_len > 4);
- num_additional_bits += 1;
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Symbol 19: RLE 4 to 5 of any length at a time. */
+ while ((run_end - run_start) >= 4) {
+ extra_bits = (run_end - run_start) > 4;
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
precode_freqs[19]++;
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = 19;
- output_syms[output_syms_idx++] = additional_bits;
- output_syms[output_syms_idx++] = delta;
- cur_run_len -= 4 + additional_bits;
+ precode_freqs[delta]++;
+ *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
+ run_start += 4 + extra_bits;
}
}
- /* Any remaining lengths in the run are outputted without RLE,
- * as a difference from the length of that codeword in the
- * previous code. */
- while (cur_run_len > 0) {
- signed char delta;
-
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Output any remaining lengths without RLE. */
+ while (run_start != run_end) {
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
-
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = delta;
- cur_run_len--;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
}
+ } while (run_start != num_lens);
- cur_run_len = 1;
- }
-
- /* Build the precode from the frequencies of the length symbols. */
-
- make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
- precode_freqs, precode_lens,
- precode_codewords);
-
- *num_additional_bits_ret = num_additional_bits;
-
- return output_syms_idx;
+ return itemptr - precode_items;
}
/*
- * Writes a compressed Huffman code to the output, preceded by the precode for
- * it.
- *
- * The Huffman code is represented in the output as a series of path lengths
- * from which the canonical Huffman code can be reconstructed. The path lengths
- * themselves are compressed using a separate Huffman code, the precode, which
- * consists of LZX_PRECODE_NUM_SYMBOLS (= 20) symbols that cover all possible
- * code lengths, plus extra codes for repeated lengths. The path lengths of the
- * precode precede the path lengths of the larger code and are uncompressed,
- * consisting of 20 entries of 4 bits each.
- *
- * @out: Bitstream to write the code to.
- * @lens: The code lengths for the Huffman code, indexed by symbol.
- * @prev_lens: Code lengths for this Huffman code, indexed by symbol,
- * in the *previous block*, or all zeroes if this is the
- * first block.
- * @num_syms: The number of symbols in the code.
+ * Output a Huffman code in the compressed form used in LZX.
+ *
+ * The Huffman code is represented in the output as a logical series of codeword
+ * lengths from which the Huffman code, which must be in canonical form, can be
+ * reconstructed.
+ *
+ * The codeword lengths are themselves compressed using a separate Huffman code,
+ * the "precode", which contains a symbol for each possible codeword length in
+ * the larger code as well as several special symbols to represent repeated
+ * codeword lengths (a form of run-length encoding). The precode is itself
+ * constructed in canonical form, and its codeword lengths are represented
+ * literally in 20 4-bit fields that immediately precede the compressed codeword
+ * lengths of the larger code.
+ *
+ * Furthermore, the codeword lengths of the larger code are actually represented
+ * as deltas from the codeword lengths of the corresponding code in the previous
+ * block.
+ *
+ * @os:
+ * Bitstream to which to write the compressed Huffman code.
+ * @lens:
+ * The codeword lengths, indexed by symbol, in the Huffman code.
+ * @prev_lens:
+ * The codeword lengths, indexed by symbol, in the corresponding Huffman
+ * code in the previous block, or all zeroes if this is the first block.
+ * @num_lens:
+ * The number of symbols in the Huffman code.
*/
static void
-lzx_write_compressed_code(struct output_bitstream *out,
+lzx_write_compressed_code(struct lzx_output_bitstream *os,
const u8 lens[restrict],
const u8 prev_lens[restrict],
- unsigned num_syms)
+ unsigned num_lens)
{
- input_idx_t precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
- u8 output_syms[num_syms];
+ u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
- u16 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
+ u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
+ unsigned precode_items[num_lens];
+ unsigned num_precode_items;
+ unsigned precode_item;
+ unsigned precode_sym;
unsigned i;
- unsigned num_output_syms;
- u8 precode_sym;
- unsigned dummy;
-
- num_output_syms = lzx_build_precode(lens,
- prev_lens,
- num_syms,
- precode_freqs,
- output_syms,
- precode_lens,
- precode_codewords,
- &dummy);
-
- /* Write the lengths of the precode codes to the output. */
+
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(out, precode_lens[i],
- LZX_PRECODE_ELEMENT_SIZE);
+ precode_freqs[i] = 0;
- /* Write the length symbols, encoded with the precode, to the output. */
+ /* Compute the "items" (RLE / literal tokens and extra bits) with which
+ * the codeword lengths in the larger code will be output. */
+ num_precode_items = lzx_compute_precode_items(lens,
+ prev_lens,
+ num_lens,
+ precode_freqs,
+ precode_items);
- for (i = 0; i < num_output_syms; ) {
- precode_sym = output_syms[i++];
+ /* Build the precode. */
+ make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
+ LZX_MAX_PRE_CODEWORD_LEN,
+ precode_freqs, precode_lens,
+ precode_codewords);
- bitstream_put_bits(out, precode_codewords[precode_sym],
- precode_lens[precode_sym]);
- switch (precode_sym) {
- case 17:
- bitstream_put_bits(out, output_syms[i++], 4);
- break;
- case 18:
- bitstream_put_bits(out, output_syms[i++], 5);
- break;
- case 19:
- bitstream_put_bits(out, output_syms[i++], 1);
- bitstream_put_bits(out,
- precode_codewords[output_syms[i]],
- precode_lens[output_syms[i]]);
- i++;
- break;
- default:
- break;
+ /* Output the lengths of the codewords in the precode. */
+ for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
+ lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
+
+ /* Output the encoded lengths of the codewords in the larger code. */
+ for (i = 0; i < num_precode_items; i++) {
+ precode_item = precode_items[i];
+ precode_sym = precode_item & 0x1F;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ if (precode_sym >= 17) {
+ if (precode_sym == 17) {
+ lzx_write_bits(os, precode_item >> 5, 4);
+ } else if (precode_sym == 18) {
+ lzx_write_bits(os, precode_item >> 5, 5);
+ } else {
+ lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ precode_sym = precode_item >> 6;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ }
}
}
}
+/* Output a match or literal. */
+static inline void
+lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
+ unsigned ones_if_aligned, const struct lzx_codes *codes)
+{
+ u64 data = item.data;
+ unsigned main_symbol;
+ unsigned len_symbol;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ main_symbol = data & 0x3FF;
+
+ lzx_write_varbits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol],
+ LZX_MAX_MAIN_CODEWORD_LEN);
+
+ if (main_symbol < LZX_NUM_CHARS) /* Literal? */
+ return;
+
+ len_symbol = (data >> 10) & 0xFF;
+
+ if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
+ lzx_write_varbits(os, codes->codewords.len[len_symbol],
+ codes->lens.len[len_symbol],
+ LZX_MAX_LEN_CODEWORD_LEN);
+ }
+
+ num_extra_bits = (data >> 18) & 0x1F;
+ if (num_extra_bits == 0) /* Small offset or repeat offset match? */
+ return;
+
+ extra_bits = data >> 23;
+
+ /*if (block_type == LZX_BLOCKTYPE_ALIGNED && num_extra_bits >= 3) {*/
+ if ((num_extra_bits & ones_if_aligned) >= 3) {
+
+ /* Aligned offset blocks: The low 3 bits of the extra offset
+ * bits are Huffman-encoded using the aligned offset code. The
+ * remaining bits are output literally. */
+
+ lzx_write_varbits(os, extra_bits >> 3, num_extra_bits - 3, 14);
+
+ lzx_write_varbits(os, codes->codewords.aligned[extra_bits & 7],
+ codes->lens.aligned[extra_bits & 7],
+ LZX_MAX_ALIGNED_CODEWORD_LEN);
+ } else {
+ /* Verbatim blocks, or fewer than 3 extra bits: All extra
+ * offset bits are output literally. */
+ lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
+ }
+}
+
/*
- * Writes all compressed matches and literal bytes in an LZX block to the the
- * output bitstream.
+ * Write all matches and literal bytes (which were precomputed) in an LZX
+ * compressed block to the output bitstream in the final compressed
+ * representation.
*
- * @ostream
+ * @os
* The output bitstream.
* @block_type
- * The type of the block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM).
- * @match_tab
- * The array of matches/literals that will be output (length @match_count).
- * @match_count
- * Number of matches/literals to be output.
+ * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM).
+ * @items
+ * The array of matches/literals to output.
+ * @num_items
+ * Number of matches/literals to output (length of @items).
* @codes
- * Pointer to a structure that contains the codewords for the main, length,
- * and aligned offset Huffman codes.
+ * The main, length, and aligned offset Huffman codes for the current
+ * LZX compressed block.
*/
static void
-lzx_write_matches_and_literals(struct output_bitstream *ostream,
- int block_type,
- const struct lzx_match match_tab[],
- unsigned match_count,
- const struct lzx_codes *codes)
-{
- for (unsigned i = 0; i < match_count; i++) {
- struct lzx_match match = match_tab[i];
-
- /* High bit of the match indicates whether the match is an
- * actual match (1) or a literal uncompressed byte (0) */
- if (match.data & 0x80000000) {
- /* match */
- lzx_write_match(ostream, block_type,
- match, codes);
- } else {
- /* literal byte */
- bitstream_put_bits(ostream,
- codes->codewords.main[match.data],
- codes->lens.main[match.data]);
- }
- }
-}
-
-static void
-lzx_assert_codes_valid(const struct lzx_codes * codes, unsigned num_main_syms)
+lzx_write_items(struct lzx_output_bitstream *os, int block_type,
+ const struct lzx_item items[], u32 num_items,
+ const struct lzx_codes *codes)
{
-#ifdef ENABLE_LZX_DEBUG
- unsigned i;
-
- for (i = 0; i < num_main_syms; i++)
- LZX_ASSERT(codes->lens.main[i] <= LZX_MAX_MAIN_CODEWORD_LEN);
+ unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- LZX_ASSERT(codes->lens.len[i] <= LZX_MAX_LEN_CODEWORD_LEN);
-
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- LZX_ASSERT(codes->lens.aligned[i] <= LZX_MAX_ALIGNED_CODEWORD_LEN);
-
- const unsigned tablebits = 10;
- u16 decode_table[(1 << tablebits) +
- (2 * max(num_main_syms, LZX_LENCODE_NUM_SYMBOLS))]
- _aligned_attribute(DECODE_TABLE_ALIGNMENT);
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- num_main_syms,
- min(tablebits, LZX_MAINCODE_TABLEBITS),
- codes->lens.main,
- LZX_MAX_MAIN_CODEWORD_LEN));
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- LZX_LENCODE_NUM_SYMBOLS,
- min(tablebits, LZX_LENCODE_TABLEBITS),
- codes->lens.len,
- LZX_MAX_LEN_CODEWORD_LEN));
- LZX_ASSERT(0 == make_huffman_decode_table(decode_table,
- LZX_ALIGNEDCODE_NUM_SYMBOLS,
- min(tablebits, LZX_ALIGNEDCODE_TABLEBITS),
- codes->lens.aligned,
- LZX_MAX_ALIGNED_CODEWORD_LEN));
-#endif /* ENABLE_LZX_DEBUG */
+ for (u32 i = 0; i < num_items; i++)
+ lzx_write_item(os, items[i], ones_if_aligned, codes);
}
-/* Write an LZX aligned offset or verbatim block to the output. */
+/* Write an LZX aligned offset or verbatim block to the output bitstream. */
static void
lzx_write_compressed_block(int block_type,
- unsigned block_size,
- unsigned max_window_size,
+ u32 block_size,
+ unsigned window_order,
unsigned num_main_syms,
- struct lzx_match * chosen_matches,
- unsigned num_chosen_matches,
+ struct lzx_item * chosen_items,
+ u32 num_chosen_items,
const struct lzx_codes * codes,
- const struct lzx_codes * prev_codes,
- struct output_bitstream * ostream)
+ const struct lzx_lens * prev_lens,
+ struct lzx_output_bitstream * os)
{
- unsigned i;
-
LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
block_type == LZX_BLOCKTYPE_VERBATIM);
- lzx_assert_codes_valid(codes, num_main_syms);
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
- bitstream_put_bits(ostream, block_type, 3);
+ lzx_write_bits(os, block_type, 3);
/* Output the block size.
*
* because WIMs created with chunk size greater than 32768 can seemingly
* only be opened by wimlib anyway. */
if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
- bitstream_put_bits(ostream, 1, 1);
+ lzx_write_bits(os, 1, 1);
} else {
- bitstream_put_bits(ostream, 0, 1);
+ lzx_write_bits(os, 0, 1);
- if (max_window_size >= 65536)
- bitstream_put_bits(ostream, block_size >> 16, 8);
+ if (window_order >= 16)
+ lzx_write_bits(os, block_size >> 16, 8);
- bitstream_put_bits(ostream, block_size, 16);
+ lzx_write_bits(os, block_size & 0xFFFF, 16);
}
- /* Write out lengths of the main code. Note that the LZX specification
- * incorrectly states that the aligned offset code comes after the
- * length code, but in fact it is the very first code to be written
- * (before the main code). */
- if (block_type == LZX_BLOCKTYPE_ALIGNED)
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(ostream, codes->lens.aligned[i],
- LZX_ALIGNEDCODE_ELEMENT_SIZE);
-
- LZX_DEBUG("Writing main code...");
-
- /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
- * the main code, which are the codewords for literal bytes. */
- lzx_write_compressed_code(ostream,
- codes->lens.main,
- prev_codes->lens.main,
- LZX_NUM_CHARS);
+ /* If it's an aligned offset block, output the aligned offset code. */
+ if (block_type == LZX_BLOCKTYPE_ALIGNED) {
+ for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ lzx_write_bits(os, codes->lens.aligned[i],
+ LZX_ALIGNEDCODE_ELEMENT_SIZE);
+ }
+ }
- /* Write the precode and lengths for the rest of the main code, which
- * are the codewords for match headers. */
- lzx_write_compressed_code(ostream,
- codes->lens.main + LZX_NUM_CHARS,
- prev_codes->lens.main + LZX_NUM_CHARS,
+ /* Output the main code (two parts). */
+ lzx_write_compressed_code(os, codes->lens.main,
+ prev_lens->main,
+ LZX_NUM_CHARS);
+ lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
+ prev_lens->main + LZX_NUM_CHARS,
num_main_syms - LZX_NUM_CHARS);
- LZX_DEBUG("Writing length code...");
-
- /* Write the precode and lengths for the length code. */
- lzx_write_compressed_code(ostream,
- codes->lens.len,
- prev_codes->lens.len,
+ /* Output the length code. */
+ lzx_write_compressed_code(os, codes->lens.len,
+ prev_lens->len,
LZX_LENCODE_NUM_SYMBOLS);
- LZX_DEBUG("Writing matches and literals...");
-
- /* Write the actual matches and literals. */
- lzx_write_matches_and_literals(ostream, block_type,
- chosen_matches, num_chosen_matches,
- codes);
-
- LZX_DEBUG("Done writing block.");
+ /* Output the compressed matches and literals. */
+ lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
}
-/* Write out the LZX blocks that were computed. */
-static void
-lzx_write_all_blocks(struct lzx_compressor *ctx, struct output_bitstream *ostream)
+/* Don't allow matches to span the end of an LZX block. */
+static inline unsigned
+maybe_truncate_matches(struct lz_match matches[], unsigned num_matches,
+ struct lzx_compressor *c)
{
+ if (c->match_window_end < c->cur_window_size && num_matches != 0) {
+ u32 limit = c->match_window_end - c->match_window_pos;
+
+ if (limit >= LZX_MIN_MATCH_LEN) {
- const struct lzx_codes *prev_codes = &ctx->zero_codes;
- for (unsigned i = 0; i < ctx->num_blocks; i++) {
- const struct lzx_block_spec *spec = &ctx->block_specs[i];
-
- LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_matches=%u)...",
- i + 1, ctx->num_blocks,
- spec->block_type, spec->block_size,
- spec->num_chosen_matches);
-
- lzx_write_compressed_block(spec->block_type,
- spec->block_size,
- ctx->max_window_size,
- ctx->num_main_syms,
- &ctx->chosen_matches[spec->chosen_matches_start_pos],
- spec->num_chosen_matches,
- &spec->codes,
- prev_codes,
- ostream);
-
- prev_codes = &spec->codes;
+ unsigned i = num_matches - 1;
+ do {
+ if (matches[i].len >= limit) {
+ matches[i].len = limit;
+
+ /* Truncation might produce multiple
+ * matches with length 'limit'. Keep at
+ * most 1. */
+ num_matches = i + 1;
+ }
+ } while (i--);
+ } else {
+ num_matches = 0;
+ }
}
+ return num_matches;
}
-/* Constructs an LZX match from a literal byte and updates the main code symbol
- * frequencies. */
-static u32
-lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
+static unsigned
+lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- freqs->main[lit]++;
- return (u32)lit;
-}
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
-/* Constructs an LZX match from an offset and a length, and updates the LRU
- * queue and the frequency of symbols in the main, length, and aligned offset
- * alphabets. The return value is a 32-bit number that provides the match in an
- * intermediate representation documented below. */
-static u32
-lzx_tally_match(unsigned match_len, unsigned match_offset,
- struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
-{
- unsigned position_slot;
- unsigned position_footer;
- u32 len_header;
- unsigned main_symbol;
- unsigned len_footer;
- unsigned adjusted_match_len;
-
- LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
-
- /* The match offset shall be encoded as a position slot (itself encoded
- * as part of the main symbol) and a position footer. */
- position_slot = lzx_get_position_slot(match_offset, queue);
- position_footer = (match_offset + LZX_OFFSET_OFFSET) &
- ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
-
- /* The match length shall be encoded as a length header (itself encoded
- * as part of the main symbol) and an optional length footer. */
- adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
- if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
- /* No length footer needed. */
- len_header = adjusted_match_len;
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (likely(cache_ptr <= c->cache_limit)) {
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ cache_ptr->len = num_matches;
+ c->cache_ptr = matches + num_matches;
} else {
- /* Length footer needed. It will be encoded using the length
- * code. */
- len_header = LZX_NUM_PRIMARY_LENS;
- len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
- freqs->len[len_footer]++;
+ num_matches = 0;
}
-
- /* Account for the main symbol. */
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
-
- freqs->main[main_symbol]++;
-
- /* In an aligned offset block, 3 bits of the position footer are output
- * as an aligned offset symbol. Account for this, although we may
- * ultimately decide to output the block as verbatim. */
-
- /* The following check is equivalent to:
- *
- * if (lzx_extra_bits[position_slot] >= 3)
- *
- * Note that this correctly excludes position slots that correspond to
- * recent offsets. */
- if (position_slot >= 8)
- freqs->aligned[position_footer & 7]++;
-
- /* Pack the position slot, position footer, and match length into an
- * intermediate representation. See `struct lzx_match' for details.
- */
- LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
- LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
- LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
-
- LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
- LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
- LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
- return 0x80000000 |
- (position_slot << 25) |
- (position_footer << 8) |
- (adjusted_match_len);
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
}
-struct lzx_record_ctx {
- struct lzx_freqs freqs;
- struct lzx_lru_queue queue;
- struct lzx_match *matches;
-};
-
-static void
-lzx_record_match(unsigned len, unsigned offset, void *_ctx)
+static unsigned
+lzx_get_matches_fillcache_multiblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- struct lzx_record_ctx *ctx = _ctx;
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
- (ctx->matches++)->data = lzx_tally_match(len, offset, &ctx->freqs, &ctx->queue);
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (likely(cache_ptr <= c->cache_limit)) {
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ num_matches = maybe_truncate_matches(matches, num_matches, c);
+ cache_ptr->len = num_matches;
+ c->cache_ptr = matches + num_matches;
+ } else {
+ num_matches = 0;
+ }
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
}
-static void
-lzx_record_literal(u8 lit, void *_ctx)
+static unsigned
+lzx_get_matches_usecache(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- struct lzx_record_ctx *ctx = _ctx;
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
- (ctx->matches++)->data = lzx_tally_literal(lit, &ctx->freqs);
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (cache_ptr <= c->cache_limit) {
+ num_matches = cache_ptr->len;
+ c->cache_ptr = matches + num_matches;
+ } else {
+ num_matches = 0;
+ }
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
}
-/* Returns the cost, in bits, to output a literal byte using the specified cost
- * model. */
static unsigned
-lzx_literal_cost(u8 c, const struct lzx_costs * costs)
+lzx_get_matches_usecache_nocheck(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- return costs->main[c];
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
+
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ num_matches = cache_ptr->len;
+ c->cache_ptr = matches + num_matches;
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
}
-/* Given a (length, offset) pair that could be turned into a valid LZX match as
- * well as costs for the codewords in the main, length, and aligned Huffman
- * codes, return the approximate number of bits it will take to represent this
- * match in the compressed output. Take into account the match offset LRU
- * queue and optionally update it. */
static unsigned
-lzx_match_cost(unsigned length, unsigned offset, const struct lzx_costs *costs,
- struct lzx_lru_queue *queue)
+lzx_get_matches_nocache_singleblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- unsigned position_slot;
- unsigned len_header, main_symbol;
- unsigned cost = 0;
-
- position_slot = lzx_get_position_slot(offset, queue);
-
- len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
+ struct lz_match *matches;
+ unsigned num_matches;
- /* Account for main symbol. */
- cost += costs->main[main_symbol];
-
- /* Account for extra position information. */
- unsigned num_extra_bits = lzx_get_num_extra_bits(position_slot);
- if (num_extra_bits >= 3) {
- cost += num_extra_bits - 3;
- cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
- } else {
- cost += num_extra_bits;
- }
-
- /* Account for extra length information. */
- if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ matches = c->cache_ptr;
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
- return cost;
+static unsigned
+lzx_get_matches_nocache_multiblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
+{
+ struct lz_match *matches;
+ unsigned num_matches;
+ matches = c->cache_ptr;
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ num_matches = maybe_truncate_matches(matches, num_matches, c);
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
}
-/* Fast heuristic cost evaluation to use in the inner loop of the match-finder.
- * Unlike lzx_match_cost() which does a true cost evaluation, this simply
- * prioritize matches based on their offset. */
-static block_cost_t
-lzx_match_cost_fast(unsigned offset, const struct lzx_lru_queue *queue)
+/*
+ * Find matches at the next position in the window.
+ *
+ * This uses a wrapper function around the underlying match-finder.
+ *
+ * Returns the number of matches found and sets *matches_ret to point to the
+ * matches array. The matches will be sorted by strictly increasing length and
+ * offset.
+ */
+static inline unsigned
+lzx_get_matches(struct lzx_compressor *c, const struct lz_match **matches_ret)
{
- /* It seems well worth it to take the time to give priority to recently
- * used offsets. */
- for (unsigned i = 0; i < LZX_NUM_RECENT_OFFSETS; i++)
- if (offset == queue->R[i])
- return i;
-
- BUILD_BUG_ON(LZX_MAX_WINDOW_SIZE >= (block_cost_t)~0U);
- return offset;
+ return (*c->get_matches_func)(c, matches_ret);
}
-/* Set the cost model @ctx->costs from the Huffman codeword lengths specified in
- * @lens.
- *
- * The cost model and codeword lengths are almost the same thing, but the
- * Huffman codewords with length 0 correspond to symbols with zero frequency
- * that still need to be assigned actual costs. The specific values assigned
- * are arbitrary, but they should be fairly high (near the maximum codeword
- * length) to take into account the fact that uses of these symbols are expected
- * to be rare. */
static void
-lzx_set_costs(struct lzx_compressor * ctx, const struct lzx_lens * lens)
+lzx_skip_bytes_fillcache(struct lzx_compressor *c, unsigned n)
{
- unsigned i;
- unsigned num_main_syms = ctx->num_main_syms;
-
- /* Main code */
- for (i = 0; i < num_main_syms; i++) {
- ctx->costs.main[i] = lens->main[i];
- if (ctx->costs.main[i] == 0)
- ctx->costs.main[i] = ctx->params.alg_params.slow.main_nostat_cost;
- }
-
- /* Length code */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
- ctx->costs.len[i] = lens->len[i];
- if (ctx->costs.len[i] == 0)
- ctx->costs.len[i] = ctx->params.alg_params.slow.len_nostat_cost;
+ struct lz_match *cache_ptr;
+
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ lz_mf_skip_positions(c->mf, n);
+ if (cache_ptr <= c->cache_limit) {
+ do {
+ cache_ptr->len = 0;
+ cache_ptr += 1;
+ } while (--n && cache_ptr <= c->cache_limit);
}
+ c->cache_ptr = cache_ptr;
+}
- /* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
- ctx->costs.aligned[i] = lens->aligned[i];
- if (ctx->costs.aligned[i] == 0)
- ctx->costs.aligned[i] = ctx->params.alg_params.slow.aligned_nostat_cost;
+static void
+lzx_skip_bytes_usecache(struct lzx_compressor *c, unsigned n)
+{
+ struct lz_match *cache_ptr;
+
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ if (cache_ptr <= c->cache_limit) {
+ do {
+ cache_ptr += 1 + cache_ptr->len;
+ } while (--n && cache_ptr <= c->cache_limit);
}
+ c->cache_ptr = cache_ptr;
}
-/* Advance the suffix array match-finder to the next position. */
static void
-lzx_lz_update_salink(input_idx_t i,
- const input_idx_t SA[restrict],
- const input_idx_t ISA[restrict],
- struct salink link[restrict])
+lzx_skip_bytes_usecache_nocheck(struct lzx_compressor *c, unsigned n)
{
- /* r = Rank of the suffix at the current position. */
- const input_idx_t r = ISA[i];
-
- /* next = rank of LOWEST ranked suffix that is ranked HIGHER than the
- * current suffix AND has a LOWER position, or -1 if none exists. */
- const input_idx_t next = link[r].next;
-
- /* prev = rank of HIGHEST ranked suffix that is ranked LOWER than the
- * current suffix AND has a LOWER position, or -1 if none exists. */
- const input_idx_t prev = link[r].prev;
-
- /* Link the suffix at the current position into the linked list that
- * contains all suffixes in the suffix array that are appear at or
- * before the current position, sorted by rank.
- *
- * Save the values of all fields we overwrite so that rollback is
- * possible. */
- if (next != (input_idx_t)~0U) {
-
- link[next].prev = r;
- link[next].lcpprev = link[r].lcpnext;
- }
+ struct lz_match *cache_ptr;
- if (prev != (input_idx_t)~0U) {
+ cache_ptr = c->cache_ptr;
+ c->match_window_pos += n;
+ do {
+ cache_ptr += 1 + cache_ptr->len;
+ } while (--n);
+ c->cache_ptr = cache_ptr;
+}
- link[prev].next = r;
- link[prev].lcpnext = link[r].lcpprev;
- }
+static void
+lzx_skip_bytes_nocache(struct lzx_compressor *c, unsigned n)
+{
+ c->match_window_pos += n;
+ lz_mf_skip_positions(c->mf, n);
}
/*
- * Use the suffix array match-finder to retrieve a list of LZ matches at the
- * current position.
- *
- * [in] @i Current position in the window.
- * [in] @SA Suffix array for the window.
- * [in] @ISA Inverse suffix array for the window.
- * [inout] @link Suffix array links used internally by the match-finder.
- * [out] @matches The (length, offset) pairs of the resulting matches will
- * be written here, sorted in decreasing order by
- * length. All returned lengths will be unique.
- * [in] @queue Recently used match offsets, used when evaluating the
- * cost of matches.
- * [in] @min_match_len Minimum match length to return.
- * [in] @max_matches_to_consider Maximum number of matches to consider at
- * the position.
- * [in] @max_matches_to_return Maximum number of matches to return.
+ * Skip the specified number of positions in the window (don't search for
+ * matches at them).
*
- * The return value is the number of matches found and written to @matches.
+ * This uses a wrapper function around the underlying match-finder.
*/
-static unsigned
-lzx_lz_get_matches(const input_idx_t i,
- const input_idx_t SA[const restrict],
- const input_idx_t ISA[const restrict],
- struct salink link[const restrict],
- struct raw_match matches[const restrict],
- const struct lzx_lru_queue * const restrict queue,
- const unsigned min_match_len,
- const u32 max_matches_to_consider,
- const u32 max_matches_to_return)
+static inline void
+lzx_skip_bytes(struct lzx_compressor *c, unsigned n)
{
- /* r = Rank of the suffix at the current position. */
- const input_idx_t r = ISA[i];
-
- /* Prepare for searching the current position. */
- lzx_lz_update_salink(i, SA, ISA, link);
-
- /* L = rank of next suffix to the left;
- * R = rank of next suffix to the right;
- * lenL = length of match between current position and the suffix with rank L;
- * lenR = length of match between current position and the suffix with rank R.
- *
- * This is left and right relative to the rank of the current suffix.
- * Since the suffixes in the suffix array are sorted, the longest
- * matches are immediately to the left and right (using the linked list
- * to ignore all suffixes that occur later in the window). The match
- * length decreases the farther left and right we go. We shall keep the
- * length on both sides in sync in order to choose the lowest-cost match
- * of each length.
- */
- input_idx_t L = link[r].prev;
- input_idx_t R = link[r].next;
- input_idx_t lenL = link[r].lcpprev;
- input_idx_t lenR = link[r].lcpnext;
-
- /* nmatches = number of matches found so far. */
- unsigned nmatches = 0;
-
- /* best_cost = cost of lowest-cost match found so far.
- *
- * We keep track of this so that we can ignore shorter matches that do
- * not have lower costs than a longer matches already found.
- */
- block_cost_t best_cost = INFINITE_BLOCK_COST;
+ return (*c->skip_bytes_func)(c, n);
+}
- /* count_remaining = maximum number of possible matches remaining to be
- * considered. */
- u32 count_remaining = max_matches_to_consider;
+/* Tally, and optionally record, the specified literal byte. */
+static inline void
+lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
+ struct lzx_item **next_chosen_item)
+{
+ unsigned main_symbol = literal;
- /* pending = match currently being considered for a specific length. */
- struct raw_match pending;
- block_cost_t pending_cost;
+ c->freqs.main[main_symbol]++;
- while (lenL >= min_match_len || lenR >= min_match_len)
- {
- pending.len = lenL;
- pending_cost = INFINITE_BLOCK_COST;
- block_cost_t cost;
+ if (next_chosen_item) {
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = main_symbol,
+ };
+ }
+}
- /* Extend left. */
- if (lenL >= min_match_len && lenL >= lenR) {
- for (;;) {
+/* Tally, and optionally record, the specified repeat offset match. */
+static inline void
+lzx_declare_repeat_offset_match(struct lzx_compressor *c,
+ unsigned len, unsigned rep_index,
+ struct lzx_item **next_chosen_item)
+{
+ unsigned len_header;
+ unsigned main_symbol;
+ unsigned len_symbol;
- if (--count_remaining == 0)
- goto out_save_pending;
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ len_symbol = LZX_LENCODE_NUM_SYMBOLS;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
+ c->freqs.len[len_symbol]++;
+ }
- input_idx_t offset = i - SA[L];
+ main_symbol = LZX_NUM_CHARS + ((rep_index << 3) | len_header);
- /* Save match if it has smaller cost. */
- cost = lzx_match_cost_fast(offset, queue);
- if (cost < pending_cost) {
- pending.offset = offset;
- pending_cost = cost;
- }
+ c->freqs.main[main_symbol]++;
- if (link[L].lcpprev < lenL) {
- /* Match length decreased. */
-
- lenL = link[L].lcpprev;
-
- /* Save the pending match unless the
- * right side still may have matches of
- * this length to be scanned, or if a
- * previous (longer) match had lower
- * cost. */
- if (pending.len > lenR) {
- if (pending_cost < best_cost) {
- best_cost = pending_cost;
- matches[nmatches++] = pending;
- if (nmatches == max_matches_to_return)
- return nmatches;
- }
- pending.len = lenL;
- pending_cost = INFINITE_BLOCK_COST;
- }
- if (lenL < min_match_len || lenL < lenR)
- break;
- }
- L = link[L].prev;
- }
- }
+ if (next_chosen_item) {
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = (u64)main_symbol | ((u64)len_symbol << 10),
+ };
+ }
+}
- pending.len = lenR;
+/* Tally, and optionally record, the specified explicit offset match. */
+static inline void
+lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
+ struct lzx_item **next_chosen_item)
+{
+ unsigned len_header;
+ unsigned main_symbol;
+ unsigned len_symbol;
+ unsigned offset_slot;
+ unsigned num_extra_bits;
+ u32 extra_bits;
- /* Extend right. */
- if (lenR >= min_match_len && lenR > lenL) {
- for (;;) {
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ len_symbol = LZX_LENCODE_NUM_SYMBOLS;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
+ c->freqs.len[len_symbol]++;
+ }
- if (--count_remaining == 0)
- goto out_save_pending;
+ offset_slot = lzx_get_offset_slot_raw(offset + LZX_OFFSET_OFFSET);
- input_idx_t offset = i - SA[R];
+ main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
- /* Save match if it has smaller cost. */
- cost = lzx_match_cost_fast(offset, queue);
- if (cost < pending_cost) {
- pending.offset = offset;
- pending_cost = cost;
- }
+ c->freqs.main[main_symbol]++;
- if (link[R].lcpnext < lenR) {
- /* Match length decreased. */
+ if (offset_slot >= 8)
+ c->freqs.aligned[(offset + LZX_OFFSET_OFFSET) & 7]++;
- lenR = link[R].lcpnext;
+ if (next_chosen_item) {
- /* Save the pending match unless a
- * previous (longer) match had lower
- * cost. */
- if (pending_cost < best_cost) {
- matches[nmatches++] = pending;
- best_cost = pending_cost;
- if (nmatches == max_matches_to_return)
- return nmatches;
- }
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
- if (lenR < min_match_len || lenR <= lenL)
- break;
+ extra_bits = (offset + LZX_OFFSET_OFFSET) -
+ lzx_offset_slot_base[offset_slot];
- pending.len = lenR;
- pending_cost = INFINITE_BLOCK_COST;
- }
- R = link[R].next;
- }
- }
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = (u64)main_symbol |
+ ((u64)len_symbol << 10) |
+ ((u64)num_extra_bits << 18) |
+ ((u64)extra_bits << 23),
+ };
}
- goto out;
+}
-out_save_pending:
- if (pending_cost != INFINITE_BLOCK_COST)
- matches[nmatches++] = pending;
+/* Tally, and optionally record, the specified match or literal. */
+static inline void
+lzx_declare_item(struct lzx_compressor *c, u32 mc_item_data,
+ struct lzx_item **next_chosen_item)
+{
+ u32 len = mc_item_data & MC_LEN_MASK;
+ u32 offset_data = mc_item_data >> MC_OFFSET_SHIFT;
+
+ if (len == 1)
+ lzx_declare_literal(c, offset_data, next_chosen_item);
+ else if (offset_data < LZX_NUM_RECENT_OFFSETS)
+ lzx_declare_repeat_offset_match(c, len, offset_data,
+ next_chosen_item);
+ else
+ lzx_declare_explicit_offset_match(c, len,
+ offset_data - LZX_OFFSET_OFFSET,
+ next_chosen_item);
+}
-out:
- return nmatches;
+static inline void
+lzx_record_item_list(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ struct lzx_item **next_chosen_item)
+{
+ struct lzx_mc_pos_data *end_optimum_ptr;
+ u32 saved_item;
+ u32 item;
+
+ /* The list is currently in reverse order (last item to first item).
+ * Reverse it. */
+ end_optimum_ptr = cur_optimum_ptr;
+ saved_item = cur_optimum_ptr->mc_item_data;
+ do {
+ item = saved_item;
+ cur_optimum_ptr -= item & MC_LEN_MASK;
+ saved_item = cur_optimum_ptr->mc_item_data;
+ cur_optimum_ptr->mc_item_data = item;
+ } while (cur_optimum_ptr != c->optimum);
+
+ /* Walk the list of items from beginning to end, tallying and recording
+ * each item. */
+ do {
+ lzx_declare_item(c, cur_optimum_ptr->mc_item_data, next_chosen_item);
+ cur_optimum_ptr += (cur_optimum_ptr->mc_item_data) & MC_LEN_MASK;
+ } while (cur_optimum_ptr != end_optimum_ptr);
}
+static inline void
+lzx_tally_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr)
+{
+ /* Since we're just tallying the items, we don't need to reverse the
+ * list. Processing the items in reverse order is fine. */
+ do {
+ lzx_declare_item(c, cur_optimum_ptr->mc_item_data, NULL);
+ cur_optimum_ptr -= (cur_optimum_ptr->mc_item_data & MC_LEN_MASK);
+ } while (cur_optimum_ptr != c->optimum);
+}
-/* Tell the match-finder to skip the specified number of bytes (@n) in the
- * input. */
+/* Tally, and optionally (if next_chosen_item != NULL) record, in order, all
+ * items in the current list of items found by the match-chooser. */
static void
-lzx_lz_skip_bytes(struct lzx_compressor *ctx, unsigned n)
+lzx_declare_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr,
+ struct lzx_item **next_chosen_item)
{
- LZX_ASSERT(n <= ctx->match_window_end - ctx->match_window_pos);
- if (ctx->matches_cached) {
- ctx->match_window_pos += n;
- while (n--) {
- ctx->cached_matches_pos +=
- ctx->cached_matches[ctx->cached_matches_pos].len + 1;
- }
- } else {
- while (n--) {
- ctx->cached_matches[ctx->cached_matches_pos++].len = 0;
- lzx_lz_update_salink(ctx->match_window_pos++, ctx->SA,
- ctx->ISA, ctx->salink);
- }
- }
+ if (next_chosen_item)
+ lzx_record_item_list(c, cur_optimum_ptr, next_chosen_item);
+ else
+ lzx_tally_item_list(c, cur_optimum_ptr);
}
-/* Retrieve a list of matches available at the next position in the input.
+/* Set the cost model @c->costs from the Huffman codeword lengths specified in
+ * @lens.
*
- * The matches are written to ctx->matches in decreasing order of length, and
- * the return value is the number of matches found. */
-static unsigned
-lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
- const struct lzx_lru_queue *queue,
- struct raw_match **matches_ret)
+ * The cost model and codeword lengths are almost the same thing, but the
+ * Huffman codewords with length 0 correspond to symbols with zero frequency
+ * that still need to be assigned actual costs. The specific values assigned
+ * are arbitrary, but they should be fairly high (near the maximum codeword
+ * length) to take into account the fact that uses of these symbols are expected
+ * to be rare. */
+static void
+lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens)
{
- unsigned num_matches;
- struct raw_match *matches;
+ unsigned i;
- LZX_ASSERT(ctx->match_window_pos <= ctx->match_window_end);
+ /* Main code */
+ for (i = 0; i < c->num_main_syms; i++)
+ c->costs.main[i] = lens->main[i] ? lens->main[i] : 15;
- matches = &ctx->cached_matches[ctx->cached_matches_pos + 1];
+ /* Length code */
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
+ c->costs.len[i] = lens->len[i] ? lens->len[i] : 15;
- if (ctx->matches_cached) {
- num_matches = matches[-1].len;
- } else {
- unsigned min_match_len = LZX_MIN_MATCH_LEN;
- if (!ctx->params.alg_params.slow.use_len2_matches)
- min_match_len = max(min_match_len, 3);
- const u32 max_search_depth = ctx->params.alg_params.slow.max_search_depth;
- const u32 max_matches_per_pos = ctx->params.alg_params.slow.max_matches_per_pos;
+ /* Aligned offset code */
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
+ c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : 7;
+}
- if (unlikely(max_search_depth == 0 || max_matches_per_pos == 0))
- num_matches = 0;
- else
- num_matches = lzx_lz_get_matches(ctx->match_window_pos,
- ctx->SA,
- ctx->ISA,
- ctx->salink,
- matches,
- queue,
- min_match_len,
- max_search_depth,
- max_matches_per_pos);
- matches[-1].len = num_matches;
- }
- ctx->cached_matches_pos += num_matches + 1;
- *matches_ret = matches;
+/* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
+ * algorithm. */
+static void
+lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
+{
+ unsigned i;
- /* Cap the length of returned matches to the number of bytes remaining,
- * if it is not the whole window. */
- if (ctx->match_window_end < ctx->window_size) {
- unsigned maxlen = ctx->match_window_end - ctx->match_window_pos;
- for (unsigned i = 0; i < num_matches; i++)
- if (matches[i].len > maxlen)
- matches[i].len = maxlen;
- }
-#if 0
- fprintf(stderr, "Pos %u/%u: %u matches\n",
- ctx->match_window_pos, ctx->match_window_end, num_matches);
- for (unsigned i = 0; i < num_matches; i++)
- fprintf(stderr, "\tLen %u Offset %u\n", matches[i].len, matches[i].offset);
-#endif
+ /* Main code (part 1): Literal symbols */
+ for (i = 0; i < LZX_NUM_CHARS; i++)
+ costs->main[i] = 8;
-#ifdef ENABLE_LZX_DEBUG
- for (unsigned i = 0; i < num_matches; i++) {
- LZX_ASSERT(matches[i].len >= LZX_MIN_MATCH_LEN);
- LZX_ASSERT(matches[i].len <= LZX_MAX_MATCH_LEN);
- LZX_ASSERT(matches[i].len <= ctx->match_window_end - ctx->match_window_pos);
- LZX_ASSERT(matches[i].offset > 0);
- LZX_ASSERT(matches[i].offset <= ctx->match_window_pos);
- LZX_ASSERT(!memcmp(&ctx->window[ctx->match_window_pos],
- &ctx->window[ctx->match_window_pos - matches[i].offset],
- matches[i].len));
- }
-#endif
+ /* Main code (part 2): Match header symbols */
+ for (; i < num_main_syms; i++)
+ costs->main[i] = 10;
- ctx->match_window_pos++;
- return num_matches;
+ /* Length code */
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
+ costs->len[i] = 8;
+
+ /* Aligned offset code */
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
+ costs->aligned[i] = 3;
}
-/*
- * Reverse the linked list of near-optimal matches so that they can be returned
- * in forwards order.
- *
- * Returns the first match in the list.
- */
-static struct raw_match
-lzx_lz_reverse_near_optimal_match_list(struct lzx_compressor *ctx,
- unsigned cur_pos)
+/* Return the cost, in bits, to output a literal byte using the specified cost
+ * model. */
+static inline u32
+lzx_literal_cost(unsigned literal, const struct lzx_costs * costs)
{
- unsigned prev_link, saved_prev_link;
- unsigned prev_match_offset, saved_prev_match_offset;
+ return costs->main[literal];
+}
+
+/* Return the cost, in bits, to output a match of the specified length and
+ * offset slot using the specified cost model. Does not take into account
+ * extra offset bits. */
+static inline u32
+lzx_match_cost_raw(unsigned len, unsigned offset_slot,
+ const struct lzx_costs *costs)
+{
+ u32 cost;
+ unsigned len_header;
+ unsigned main_symbol;
- ctx->optimum_end_idx = cur_pos;
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ cost = 0;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
- saved_prev_link = ctx->optimum[cur_pos].prev.link;
- saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset;
+ /* Account for length symbol. */
+ cost = costs->len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ }
- do {
- prev_link = saved_prev_link;
- prev_match_offset = saved_prev_match_offset;
+ /* Account for main symbol. */
+ main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
+ cost += costs->main[main_symbol];
- saved_prev_link = ctx->optimum[prev_link].prev.link;
- saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset;
+ return cost;
+}
- ctx->optimum[prev_link].next.link = cur_pos;
- ctx->optimum[prev_link].next.match_offset = prev_match_offset;
+/* Equivalent to lzx_match_cost_raw(), but assumes the length is small enough
+ * that it doesn't require a length symbol. */
+static inline u32
+lzx_match_cost_raw_smalllen(unsigned len, unsigned offset_slot,
+ const struct lzx_costs *costs)
+{
+ LZX_ASSERT(len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
+ return costs->main[LZX_NUM_CHARS +
+ ((offset_slot << 3) | (len - LZX_MIN_MATCH_LEN))];
+}
- cur_pos = prev_link;
- } while (cur_pos != 0);
+/*
+ * Consider coding the match at repeat offset index @rep_idx. Consider each
+ * length from the minimum (2) to the full match length (@rep_len).
+ */
+static inline void
+lzx_consider_repeat_offset_match(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ unsigned rep_len, unsigned rep_idx)
+{
+ u32 base_cost = cur_optimum_ptr->cost;
+ u32 cost;
+ unsigned len;
+
+#if 1 /* Optimized version */
+
+ if (rep_len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS) {
+ /* All lengths being considered are small. */
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len <= rep_len);
+ } else {
+ /* Some lengths being considered are small, and some are big.
+ * Start with the optimized loop for small lengths, then switch
+ * to the optimized loop for big lengths. */
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
+
+ /* The main symbol is now fixed. */
+ base_cost += c->costs.main[LZX_NUM_CHARS +
+ ((rep_idx << 3) | LZX_NUM_PRIMARY_LENS)];
+ do {
+ cost = base_cost +
+ c->costs.len[len - LZX_MIN_MATCH_LEN -
+ LZX_NUM_PRIMARY_LENS];
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len <= rep_len);
+ }
- ctx->optimum_cur_idx = ctx->optimum[0].next.link;
+#else /* Unoptimized version */
- return (struct raw_match)
- { .len = ctx->optimum_cur_idx,
- .offset = ctx->optimum[0].next.match_offset,
- };
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len <= rep_len);
+#endif
}
/*
- * lzx_lz_get_near_optimal_match() -
- *
- * Choose the optimal match or literal to use at the next position in the input.
- *
- * Unlike a greedy parser that always takes the longest match, or even a
- * parser with one match/literal look-ahead like zlib, the algorithm used here
- * may look ahead many matches/literals to determine the optimal match/literal to
- * output next. The motivation is that the compression ratio is improved if the
- * compressor can do things like use a shorter-than-possible match in order to
- * allow a longer match later, and also take into account the Huffman code cost
- * model rather than simply assuming that longer is better.
- *
- * Still, this is not truly an optimal parser because very long matches are
- * taken immediately, and the raw match-finder takes some shortcuts. This is
- * done to avoid considering many different alternatives that are unlikely to
- * be significantly better.
- *
- * This algorithm is based on that used in 7-Zip's DEFLATE encoder.
- *
- * Each call to this function does one of two things:
+ * Consider coding each match in @matches as an explicit offset match.
*
- * 1. Build a near-optimal sequence of matches/literals, up to some point, that
- * will be returned by subsequent calls to this function, then return the
- * first one.
+ * @matches must be sorted by strictly increasing length and strictly
+ * increasing offset. This is guaranteed by the match-finder.
*
- * OR
- *
- * 2. Return the next match/literal previously computed by a call to this
- * function;
- *
- * This function relies on the following state in the compressor context:
- *
- * ctx->window (read-only: preprocessed data being compressed)
- * ctx->cost (read-only: cost model to use)
- * ctx->optimum (internal state; leave uninitialized)
- * ctx->optimum_cur_idx (must set to 0 before first call)
- * ctx->optimum_end_idx (must set to 0 before first call)
- *
- * Plus any state used by the raw match-finder.
- *
- * The return value is a (length, offset) pair specifying the match or literal
- * chosen. For literals, the length is less than LZX_MIN_MATCH_LEN and the
- * offset is meaningless.
+ * We consider each length from the minimum (2) to the longest
+ * (matches[num_matches - 1].len). For each length, we consider only
+ * the smallest offset for which that length is available. Although
+ * this is not guaranteed to be optimal due to the possibility of a
+ * larger offset costing less than a smaller offset to code, this is a
+ * very useful heuristic.
*/
-static struct raw_match
-lzx_lz_get_near_optimal_match(struct lzx_compressor * ctx)
+static inline void
+lzx_consider_explicit_offset_matches(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ const struct lz_match matches[],
+ unsigned num_matches)
{
- unsigned num_possible_matches;
- struct raw_match *possible_matches;
- struct raw_match match;
- unsigned longest_match_len;
-
- if (ctx->optimum_cur_idx != ctx->optimum_end_idx) {
- /* Case 2: Return the next match/literal already found. */
- match.len = ctx->optimum[ctx->optimum_cur_idx].next.link -
- ctx->optimum_cur_idx;
- match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset;
-
- ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link;
- return match;
+ LZX_ASSERT(num_matches > 0);
+
+ unsigned i;
+ unsigned len;
+ unsigned offset_slot;
+ u32 position_cost;
+ u32 cost;
+ u32 offset_data;
+
+
+#if 1 /* Optimized version */
+
+ if (matches[num_matches - 1].offset < LZX_NUM_FAST_OFFSETS) {
+
+ /*
+ * Offset is small; the offset slot can be looked up directly in
+ * c->offset_slot_fast.
+ *
+ * Additional optimizations:
+ *
+ * - Since the offset is small, it falls in the exponential part
+ * of the offset slot bases and the number of extra offset
+ * bits can be calculated directly as (offset_slot >> 1) - 1.
+ *
+ * - Just consider the number of extra offset bits; don't
+ * account for the aligned offset code. Usually this has
+ * almost no effect on the compression ratio.
+ *
+ * - Start out in a loop optimized for small lengths. When the
+ * length becomes high enough that a length symbol will be
+ * needed, jump into a loop optimized for big lengths.
+ */
+
+ LZX_ASSERT(offset_slot <= 37); /* for extra bits formula */
+
+ len = 2;
+ i = 0;
+ do {
+ offset_slot = c->offset_slot_fast[matches[i].offset];
+ position_cost = cur_optimum_ptr->cost +
+ ((offset_slot >> 1) - 1);
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ do {
+ if (len >= LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS)
+ goto biglen;
+ cost = position_cost +
+ lzx_match_cost_raw_smalllen(len, offset_slot,
+ &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
+
+ return;
+
+ do {
+ offset_slot = c->offset_slot_fast[matches[i].offset];
+ biglen:
+ position_cost = cur_optimum_ptr->cost +
+ ((offset_slot >> 1) - 1) +
+ c->costs.main[LZX_NUM_CHARS +
+ ((offset_slot << 3) |
+ LZX_NUM_PRIMARY_LENS)];
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ do {
+ cost = position_cost +
+ c->costs.len[len - LZX_MIN_MATCH_LEN -
+ LZX_NUM_PRIMARY_LENS];
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
+ } else {
+ len = 2;
+ i = 0;
+ do {
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ offset_slot = lzx_get_offset_slot_raw(offset_data);
+ position_cost = cur_optimum_ptr->cost +
+ lzx_extra_offset_bits[offset_slot];
+ do {
+ cost = position_cost +
+ lzx_match_cost_raw(len, offset_slot, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
}
- /* Case 1: Compute a new list of matches/literals to return. */
+#else /* Unoptimized version */
- ctx->optimum_cur_idx = 0;
- ctx->optimum_end_idx = 0;
+ unsigned num_extra_bits;
- /* Get matches at this position. */
- num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->queue, &possible_matches);
+ len = 2;
+ i = 0;
+ do {
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ position_cost = cur_optimum_ptr->cost;
+ offset_slot = lzx_get_offset_slot_raw(offset_data);
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
+ if (num_extra_bits >= 3) {
+ position_cost += num_extra_bits - 3;
+ position_cost += c->costs.aligned[offset_data & 7];
+ } else {
+ position_cost += num_extra_bits;
+ }
+ do {
+ cost = position_cost +
+ lzx_match_cost_raw(len, offset_slot, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
+#endif
+}
- /* If no matches found, return literal. */
- if (num_possible_matches == 0)
- return (struct raw_match){ .len = 0 };
+/*
+ * Search for repeat offset matches with the current position.
+ */
+static inline unsigned
+lzx_repsearch(const u8 * const strptr, const u32 bytes_remaining,
+ const struct lzx_lru_queue *queue, unsigned *rep_max_idx_ret)
+{
+ BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
+ return lz_repsearch3(strptr, min(bytes_remaining, LZX_MAX_MATCH_LEN),
+ queue->R, rep_max_idx_ret);
+}
- /* The matches that were found are sorted in decreasing order by length.
- * Get the length of the longest one. */
- longest_match_len = possible_matches[0].len;
+/*
+ * The main near-optimal parsing routine.
+ *
+ * Briefly, the algorithm does an approximate minimum-cost path search to find a
+ * "near-optimal" sequence of matches and literals to output, based on the
+ * current cost model. The algorithm steps forward, position by position (byte
+ * by byte), and updates the minimum cost path to reach each later position that
+ * can be reached using a match or literal from the current position. This is
+ * essentially Dijkstra's algorithm in disguise: the graph nodes are positions,
+ * the graph edges are possible matches/literals to code, and the cost of each
+ * edge is the estimated number of bits that will be required to output the
+ * corresponding match or literal. But one difference is that we actually
+ * compute the lowest-cost path in pieces, where each piece is terminated when
+ * there are no choices to be made.
+ *
+ * This function will run this algorithm on the portion of the window from
+ * &c->cur_window[c->match_window_pos] to &c->cur_window[c->match_window_end].
+ *
+ * On entry, c->queue must be the current state of the match offset LRU queue,
+ * and c->costs must be the current cost model to use for Huffman symbols.
+ *
+ * On exit, c->queue will be the state that the LRU queue would be in if the
+ * chosen items were to be coded.
+ *
+ * If next_chosen_item != NULL, then all items chosen will be recorded (saved in
+ * the chosen_items array). Otherwise, all items chosen will only be tallied
+ * (symbol frequencies tallied in c->freqs).
+ */
+static void
+lzx_optim_pass(struct lzx_compressor *c, struct lzx_item **next_chosen_item)
+{
+ const u8 *block_end;
+ struct lzx_lru_queue *begin_queue;
+ const u8 *window_ptr;
+ struct lzx_mc_pos_data *cur_optimum_ptr;
+ struct lzx_mc_pos_data *end_optimum_ptr;
+ const struct lz_match *matches;
+ unsigned num_matches;
+ unsigned longest_len;
+ unsigned rep_max_len;
+ unsigned rep_max_idx;
+ unsigned literal;
+ unsigned len;
+ u32 cost;
+ u32 offset_data;
+
+ block_end = &c->cur_window[c->match_window_end];
+ begin_queue = &c->queue;
+begin:
+ /* Start building a new list of items, which will correspond to the next
+ * piece of the overall minimum-cost path.
+ *
+ * *begin_queue is the current state of the match offset LRU queue. */
- /* Greedy heuristic: if the longest match that was found is greater
- * than the number of fast bytes, return it immediately; don't both
- * doing more work. */
- if (longest_match_len > ctx->params.alg_params.slow.num_fast_bytes) {
- lzx_lz_skip_bytes(ctx, longest_match_len - 1);
- return possible_matches[0];
- }
+ window_ptr = &c->cur_window[c->match_window_pos];
- /* Calculate the cost to reach the next position by outputting a
- * literal. */
- ctx->optimum[0].queue = ctx->queue;
- ctx->optimum[1].queue = ctx->optimum[0].queue;
- ctx->optimum[1].cost = lzx_literal_cost(ctx->window[ctx->match_window_pos],
- &ctx->costs);
- ctx->optimum[1].prev.link = 0;
-
- /* Calculate the cost to reach any position up to and including that
- * reached by the longest match, using the shortest (i.e. closest) match
- * that reaches each position. */
- BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
- for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
- len <= longest_match_len; len++) {
-
- LZX_ASSERT(match_idx < num_possible_matches);
-
- ctx->optimum[len].queue = ctx->optimum[0].queue;
- ctx->optimum[len].prev.link = 0;
- ctx->optimum[len].prev.match_offset = possible_matches[match_idx].offset;
- ctx->optimum[len].cost = lzx_match_cost(len,
- possible_matches[match_idx].offset,
- &ctx->costs,
- &ctx->optimum[len].queue);
- if (len == possible_matches[match_idx].len)
- match_idx--;
+ if (window_ptr == block_end) {
+ c->queue = *begin_queue;
+ return;
}
- unsigned cur_pos = 0;
+ cur_optimum_ptr = c->optimum;
+ cur_optimum_ptr->cost = 0;
+ cur_optimum_ptr->queue = *begin_queue;
- /* len_end: greatest index forward at which costs have been calculated
- * so far */
- unsigned len_end = longest_match_len;
+ end_optimum_ptr = cur_optimum_ptr;
+ /* The following loop runs once for each per byte in the window, except
+ * in a couple shortcut cases. */
for (;;) {
- /* Advance to next position. */
- cur_pos++;
-
- if (cur_pos == len_end || cur_pos == LZX_OPTIM_ARRAY_SIZE)
- return lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
- /* retrieve the number of matches available at this position */
- num_possible_matches = lzx_lz_get_matches_caching(ctx, &ctx->optimum[cur_pos].queue,
- &possible_matches);
+ /* Find explicit offset matches with the current position. */
+ num_matches = lzx_get_matches(c, &matches);
- unsigned new_len = 0;
-
- if (num_possible_matches != 0) {
- new_len = possible_matches[0].len;
+ if (num_matches) {
+ /*
+ * Find the longest repeat offset match with the current
+ * position.
+ *
+ * Heuristics:
+ *
+ * - Only search for repeat offset matches if the
+ * match-finder already found at least one match.
+ *
+ * - Only consider the longest repeat offset match. It
+ * seems to be rare for the optimal parse to include a
+ * repeat offset match that doesn't have the longest
+ * length (allowing for the possibility that not all
+ * of that length is actually used).
+ */
+ rep_max_len = lzx_repsearch(window_ptr,
+ block_end - window_ptr,
+ &cur_optimum_ptr->queue,
+ &rep_max_idx);
+
+ if (rep_max_len) {
+ /* If there's a very long repeat offset match,
+ * choose it immediately. */
+ if (rep_max_len >= c->params.nice_match_length) {
+
+ swap(cur_optimum_ptr->queue.R[0],
+ cur_optimum_ptr->queue.R[rep_max_idx]);
+ begin_queue = &cur_optimum_ptr->queue;
+
+ cur_optimum_ptr += rep_max_len;
+ cur_optimum_ptr->mc_item_data =
+ (rep_max_idx << MC_OFFSET_SHIFT) |
+ rep_max_len;
+
+ lzx_skip_bytes(c, rep_max_len - 1);
+ break;
+ }
- /* Greedy heuristic: if we found a match greater than
- * the number of fast bytes, stop immediately. */
- if (new_len > ctx->params.alg_params.slow.num_fast_bytes) {
+ /* If reaching any positions for the first time,
+ * initialize their costs to "infinity". */
+ while (end_optimum_ptr < cur_optimum_ptr + rep_max_len)
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
- /* Build the list of matches to return and get
- * the first one. */
- match = lzx_lz_reverse_near_optimal_match_list(ctx, cur_pos);
+ /* Consider coding a repeat offset match. */
+ lzx_consider_repeat_offset_match(c,
+ cur_optimum_ptr,
+ rep_max_len,
+ rep_max_idx);
+ }
- /* Append the long match to the end of the list. */
- ctx->optimum[cur_pos].next.match_offset =
- possible_matches[0].offset;
- ctx->optimum[cur_pos].next.link = cur_pos + new_len;
- ctx->optimum_end_idx = cur_pos + new_len;
+ longest_len = matches[num_matches - 1].len;
+
+ /* If there's a very long explicit offset match, choose
+ * it immediately. */
+ if (longest_len >= c->params.nice_match_length) {
+
+ cur_optimum_ptr->queue.R[2] =
+ cur_optimum_ptr->queue.R[1];
+ cur_optimum_ptr->queue.R[1] =
+ cur_optimum_ptr->queue.R[0];
+ cur_optimum_ptr->queue.R[0] =
+ matches[num_matches - 1].offset;
+ begin_queue = &cur_optimum_ptr->queue;
+
+ offset_data = matches[num_matches - 1].offset +
+ LZX_OFFSET_OFFSET;
+ cur_optimum_ptr += longest_len;
+ cur_optimum_ptr->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) |
+ longest_len;
+
+ lzx_skip_bytes(c, longest_len - 1);
+ break;
+ }
- /* Skip over the remaining bytes of the long match. */
- lzx_lz_skip_bytes(ctx, new_len - 1);
+ /* If reaching any positions for the first time,
+ * initialize their costs to "infinity". */
+ while (end_optimum_ptr < cur_optimum_ptr + longest_len)
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
- /* Return first match in the list */
- return match;
+ /* Consider coding an explicit offset match. */
+ lzx_consider_explicit_offset_matches(c, cur_optimum_ptr,
+ matches, num_matches);
+ } else {
+ /* No matches found. The only choice at this position
+ * is to code a literal. */
+
+ if (end_optimum_ptr == cur_optimum_ptr) {
+ #if 1
+ /* Optimization for single literals. */
+ if (likely(cur_optimum_ptr == c->optimum)) {
+ lzx_declare_literal(c, *window_ptr++,
+ next_chosen_item);
+ if (window_ptr == block_end) {
+ c->queue = cur_optimum_ptr->queue;
+ return;
+ }
+ continue;
+ }
+ #endif
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
}
}
- /* Consider proceeding with a literal byte. */
- block_cost_t cur_cost = ctx->optimum[cur_pos].cost;
- block_cost_t cur_plus_literal_cost = cur_cost +
- lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
- &ctx->costs);
- if (cur_plus_literal_cost < ctx->optimum[cur_pos + 1].cost) {
- ctx->optimum[cur_pos + 1].cost = cur_plus_literal_cost;
- ctx->optimum[cur_pos + 1].prev.link = cur_pos;
- ctx->optimum[cur_pos + 1].queue = ctx->optimum[cur_pos].queue;
- }
+ /* Consider coding a literal.
- if (num_possible_matches == 0)
- continue;
+ * To avoid an extra unpredictable brench, actually checking the
+ * preferability of coding a literal is integrated into the
+ * queue update code below. */
+ literal = *window_ptr++;
+ cost = cur_optimum_ptr->cost + lzx_literal_cost(literal, &c->costs);
+
+ /* Advance to the next position. */
+ cur_optimum_ptr++;
+
+ /* The lowest-cost path to the current position is now known.
+ * Finalize the recent offsets queue that results from taking
+ * this lowest-cost path. */
- /* Consider proceeding with a match. */
-
- while (len_end < cur_pos + new_len)
- ctx->optimum[++len_end].cost = INFINITE_BLOCK_COST;
-
- for (unsigned len = LZX_MIN_MATCH_LEN, match_idx = num_possible_matches - 1;
- len <= new_len; len++) {
- LZX_ASSERT(match_idx < num_possible_matches);
- struct lzx_lru_queue q = ctx->optimum[cur_pos].queue;
- block_cost_t cost = cur_cost + lzx_match_cost(len,
- possible_matches[match_idx].offset,
- &ctx->costs,
- &q);
-
- if (cost < ctx->optimum[cur_pos + len].cost) {
- ctx->optimum[cur_pos + len].cost = cost;
- ctx->optimum[cur_pos + len].prev.link = cur_pos;
- ctx->optimum[cur_pos + len].prev.match_offset =
- possible_matches[match_idx].offset;
- ctx->optimum[cur_pos + len].queue = q;
+ if (cost < cur_optimum_ptr->cost) {
+ /* Literal: queue remains unchanged. */
+ cur_optimum_ptr->cost = cost;
+ cur_optimum_ptr->mc_item_data = (literal << MC_OFFSET_SHIFT) | 1;
+ cur_optimum_ptr->queue = (cur_optimum_ptr - 1)->queue;
+ } else {
+ /* Match: queue update is needed. */
+ len = cur_optimum_ptr->mc_item_data & MC_LEN_MASK;
+ offset_data = cur_optimum_ptr->mc_item_data >> MC_OFFSET_SHIFT;
+ if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
+ /* Explicit offset match: offset is inserted at front */
+ cur_optimum_ptr->queue.R[0] = offset_data - LZX_OFFSET_OFFSET;
+ cur_optimum_ptr->queue.R[1] = (cur_optimum_ptr - len)->queue.R[0];
+ cur_optimum_ptr->queue.R[2] = (cur_optimum_ptr - len)->queue.R[1];
+ } else {
+ /* Repeat offset match: offset is swapped to front */
+ cur_optimum_ptr->queue = (cur_optimum_ptr - len)->queue;
+ swap(cur_optimum_ptr->queue.R[0],
+ cur_optimum_ptr->queue.R[offset_data]);
}
+ }
- if (len == possible_matches[match_idx].len)
- match_idx--;
+ /*
+ * This loop will terminate when either of the following
+ * conditions is true:
+ *
+ * (1) cur_optimum_ptr == end_optimum_ptr
+ *
+ * There are no paths that extend beyond the current
+ * position. In this case, any path to a later position
+ * must pass through the current position, so we can go
+ * ahead and choose the list of items that led to this
+ * position.
+ *
+ * (2) cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH]
+ *
+ * This bounds the number of times the algorithm can step
+ * forward before it is guaranteed to start choosing items.
+ * This limits the memory usage. But
+ * LZX_OPTIM_ARRAY_LENGTH is high enough that on most
+ * inputs this limit is never reached.
+ *
+ * Note: no check for end-of-block is needed because
+ * end-of-block will trigger condition (1).
+ */
+ if (cur_optimum_ptr == end_optimum_ptr ||
+ cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH])
+ {
+ begin_queue = &cur_optimum_ptr->queue;
+ break;
}
}
+
+ /* Choose the current list of items that constitute the minimum-cost
+ * path to the current position. */
+ lzx_declare_item_list(c, cur_optimum_ptr, next_chosen_item);
+ goto begin;
}
-/*
- * Set default symbol costs.
- */
-static void
-lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
+/* Fast heuristic scoring for lazy parsing: how "good" is this match? */
+static inline unsigned
+lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
{
- unsigned i;
+ unsigned score = len;
- /* Literal symbols */
- for (i = 0; i < LZX_NUM_CHARS; i++)
- costs->main[i] = 8;
+ if (adjusted_offset < 2048)
+ score++;
- /* Match header symbols */
- for (; i < num_main_syms; i++)
- costs->main[i] = 10;
+ if (adjusted_offset < 1024)
+ score++;
- /* Length symbols */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- costs->len[i] = 8;
-
- /* Aligned offset symbols */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- costs->aligned[i] = 3;
+ return score;
}
-/* Given the frequencies of symbols in a compressed block and the corresponding
- * Huffman codes, return LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM if an
- * aligned offset or verbatim block, respectively, will take fewer bits to
- * output. */
-static int
-lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
- const struct lzx_codes * codes)
+static inline unsigned
+lzx_repeat_offset_match_score(unsigned len, unsigned slot)
{
- unsigned aligned_cost = 0;
- unsigned verbatim_cost = 0;
-
- /* Verbatim blocks have a constant 3 bits per position footer. Aligned
- * offset blocks have an aligned offset symbol per position footer, plus
- * an extra 24 bits to output the lengths necessary to reconstruct the
- * aligned offset code itself. */
- for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
- verbatim_cost += 3 * freqs->aligned[i];
- aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
- }
- aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
- if (aligned_cost < verbatim_cost)
- return LZX_BLOCKTYPE_ALIGNED;
- else
- return LZX_BLOCKTYPE_VERBATIM;
+ return len + 3;
}
-/* Find a near-optimal sequence of matches/literals with which to output the
- * specified LZX block, then set its type to that which has the minimum cost to
- * output. */
-static void
-lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
- unsigned num_passes)
+/* Lazy parsing */
+static u32
+lzx_choose_lazy_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
{
- const struct lzx_lru_queue orig_queue = ctx->queue;
- struct lzx_freqs freqs;
-
- unsigned orig_window_pos = spec->window_pos;
- unsigned orig_cached_pos = ctx->cached_matches_pos;
-
- LZX_ASSERT(ctx->match_window_pos == spec->window_pos);
+ const u8 *window_ptr;
+ const u8 *block_end;
+ struct lz_mf *mf;
+ struct lz_match *matches;
+ unsigned num_matches;
+ unsigned cur_len;
+ u32 cur_offset_data;
+ unsigned cur_score;
+ unsigned rep_max_len;
+ unsigned rep_max_idx;
+ unsigned rep_score;
+ unsigned prev_len;
+ unsigned prev_score;
+ u32 prev_offset_data;
+ unsigned skip_len;
+ struct lzx_item *next_chosen_item;
+
+ window_ptr = &c->cur_window[block_start_pos];
+ block_end = window_ptr + block_size;
+ matches = c->cached_matches;
+ mf = c->mf;
+ next_chosen_item = c->chosen_items;
+
+ prev_len = 0;
+ prev_offset_data = 0;
+ prev_score = 0;
+
+ while (window_ptr != block_end) {
+
+ /* Find explicit offset matches with the current position. */
+ num_matches = lz_mf_get_matches(mf, matches);
+ window_ptr++;
+
+ if (num_matches == 0 ||
+ (matches[num_matches - 1].len == 3 &&
+ matches[num_matches - 1].offset >= 8192 - LZX_OFFSET_OFFSET &&
+ matches[num_matches - 1].offset != c->queue.R[0] &&
+ matches[num_matches - 1].offset != c->queue.R[1] &&
+ matches[num_matches - 1].offset != c->queue.R[2]))
+ {
+ /* No match found, or the only match found was a distant
+ * length 3 match. Output the previous match if there
+ * is one; otherwise output a literal. */
- ctx->match_window_end = spec->window_pos + spec->block_size;
- spec->chosen_matches_start_pos = spec->window_pos;
+ no_match_found:
- LZX_ASSERT(num_passes >= 1);
+ if (prev_len) {
+ skip_len = prev_len - 2;
+ goto output_prev_match;
+ } else {
+ lzx_declare_literal(c, *(window_ptr - 1),
+ &next_chosen_item);
+ continue;
+ }
+ }
- /* The first optimal parsing pass is done using the cost model already
- * set in ctx->costs. Each later pass is done using a cost model
- * computed from the previous pass. */
- for (unsigned pass = 0; pass < num_passes; pass++) {
+ /* Find the longest repeat offset match with the current
+ * position. */
+ if (likely(block_end - (window_ptr - 1) >= 2)) {
+ rep_max_len = lzx_repsearch((window_ptr - 1),
+ block_end - (window_ptr - 1),
+ &c->queue, &rep_max_idx);
+ } else {
+ rep_max_len = 0;
+ }
- ctx->match_window_pos = orig_window_pos;
- ctx->cached_matches_pos = orig_cached_pos;
- ctx->queue = orig_queue;
- spec->num_chosen_matches = 0;
- memset(&freqs, 0, sizeof(freqs));
+ cur_len = matches[num_matches - 1].len;
+ cur_offset_data = matches[num_matches - 1].offset + LZX_OFFSET_OFFSET;
+ cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
- for (unsigned i = spec->window_pos; i < spec->window_pos + spec->block_size; ) {
- struct raw_match raw_match;
- struct lzx_match lzx_match;
+ /* Select the better of the explicit and repeat offset matches. */
+ if (rep_max_len >= 3 &&
+ (rep_score = lzx_repeat_offset_match_score(rep_max_len,
+ rep_max_idx)) >= cur_score)
+ {
+ cur_len = rep_max_len;
+ cur_offset_data = rep_max_idx;
+ cur_score = rep_score;
+ }
- raw_match = lzx_lz_get_near_optimal_match(ctx);
- if (raw_match.len >= LZX_MIN_MATCH_LEN) {
- lzx_match.data = lzx_tally_match(raw_match.len, raw_match.offset,
- &freqs, &ctx->queue);
- i += raw_match.len;
- } else {
- lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
- i += 1;
- }
- ctx->chosen_matches[spec->chosen_matches_start_pos +
- spec->num_chosen_matches++] = lzx_match;
+ if (unlikely(cur_len > block_end - (window_ptr - 1))) {
+ /* Nearing end of block. */
+ cur_len = block_end - (window_ptr - 1);
+ if (cur_len < 3)
+ goto no_match_found;
}
- lzx_make_huffman_codes(&freqs, &spec->codes,
- ctx->num_main_syms);
- if (pass < num_passes - 1)
- lzx_set_costs(ctx, &spec->codes.lens);
- ctx->matches_cached = true;
- }
- spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
- ctx->matches_cached = false;
-}
+ if (prev_len == 0 || cur_score > prev_score) {
+ /* No previous match, or the current match is better
+ * than the previous match.
+ *
+ * If there's a previous match, then output a literal in
+ * its place.
+ *
+ * In both cases, if the current match is very long,
+ * then output it immediately. Otherwise, attempt a
+ * lazy match by waiting to see if there's a better
+ * match at the next position. */
-static void
-lzx_optimize_blocks(struct lzx_compressor *ctx)
-{
- lzx_lru_queue_init(&ctx->queue);
- ctx->optimum_cur_idx = 0;
- ctx->optimum_end_idx = 0;
+ if (prev_len)
+ lzx_declare_literal(c, *(window_ptr - 2), &next_chosen_item);
- const unsigned num_passes = ctx->params.alg_params.slow.num_optim_passes;
+ prev_len = cur_len;
+ prev_offset_data = cur_offset_data;
+ prev_score = cur_score;
- for (unsigned i = 0; i < ctx->num_blocks; i++)
- lzx_optimize_block(ctx, &ctx->block_specs[i], num_passes);
-}
+ if (prev_len >= c->params.nice_match_length) {
+ skip_len = prev_len - 1;
+ goto output_prev_match;
+ }
+ continue;
+ }
-/* Initialize the suffix array match-finder for the specified input. */
-static void
-lzx_lz_init_matchfinder(const u8 T[const restrict],
- const input_idx_t n,
- input_idx_t SA[const restrict],
- input_idx_t ISA[const restrict],
- input_idx_t LCP[const restrict],
- struct salink link[const restrict],
- const unsigned max_match_len)
-{
- /* Compute SA (Suffix Array). */
+ /* Current match is not better than the previous match, so
+ * output the previous match. */
- {
- /* ISA and link are used as temporary space. */
- BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * sizeof(ISA[0]) < 256 * sizeof(saidx_t));
- BUILD_BUG_ON(LZX_MIN_WINDOW_SIZE * 2 * sizeof(link[0]) < 256 * 256 * sizeof(saidx_t));
+ skip_len = prev_len - 2;
- if (sizeof(input_idx_t) == sizeof(saidx_t)) {
- divsufsort(T, SA, n, (saidx_t*)ISA, (saidx_t*)link);
+ output_prev_match:
+ if (prev_offset_data < LZX_NUM_RECENT_OFFSETS) {
+ lzx_declare_repeat_offset_match(c, prev_len,
+ prev_offset_data,
+ &next_chosen_item);
+ swap(c->queue.R[0], c->queue.R[prev_offset_data]);
} else {
- saidx_t sa[n];
- divsufsort(T, sa, n, (saidx_t*)ISA, (saidx_t*)link);
- for (input_idx_t i = 0; i < n; i++)
- SA[i] = sa[i];
+ lzx_declare_explicit_offset_match(c, prev_len,
+ prev_offset_data - LZX_OFFSET_OFFSET,
+ &next_chosen_item);
+ c->queue.R[2] = c->queue.R[1];
+ c->queue.R[1] = c->queue.R[0];
+ c->queue.R[0] = prev_offset_data - LZX_OFFSET_OFFSET;
}
+ lz_mf_skip_positions(mf, skip_len);
+ window_ptr += skip_len;
+ prev_len = 0;
}
-#ifdef ENABLE_LZX_DEBUG
+ return next_chosen_item - c->chosen_items;
+}
- LZX_ASSERT(n > 0);
+/* Given the frequencies of symbols in an LZX-compressed block and the
+ * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
+ * will take fewer bits to output. */
+static int
+lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
+ const struct lzx_codes * codes)
+{
+ u32 aligned_cost = 0;
+ u32 verbatim_cost = 0;
- /* Verify suffix array. */
- {
- bool found[n];
- ZERO_ARRAY(found);
- for (input_idx_t r = 0; r < n; r++) {
- input_idx_t i = SA[r];
- LZX_ASSERT(i < n);
- LZX_ASSERT(!found[i]);
- found[i] = true;
- }
+ /* A verbatim block requires 3 bits in each place that an aligned symbol
+ * would be used in an aligned offset block. */
+ for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ verbatim_cost += 3 * freqs->aligned[i];
+ aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
}
- for (input_idx_t r = 0; r < n - 1; r++) {
-
- input_idx_t i1 = SA[r];
- input_idx_t i2 = SA[r + 1];
+ /* Account for output of the aligned offset code. */
+ aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
- input_idx_t n1 = n - i1;
- input_idx_t n2 = n - i2;
+ if (aligned_cost < verbatim_cost)
+ return LZX_BLOCKTYPE_ALIGNED;
+ else
+ return LZX_BLOCKTYPE_VERBATIM;
+}
- LZX_ASSERT(memcmp(&T[i1], &T[i2], min(n1, n2)) <= 0);
+/* Near-optimal parsing */
+static u32
+lzx_choose_near_optimal_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
+{
+ u32 num_passes_remaining = c->params.num_optim_passes;
+ struct lzx_lru_queue orig_queue;
+ struct lzx_item *next_chosen_item;
+ struct lzx_item **next_chosen_item_ptr;
+
+ /* Choose appropriate match-finder wrapper functions. */
+ if (num_passes_remaining > 1) {
+ if (block_size == c->cur_window_size)
+ c->get_matches_func = lzx_get_matches_fillcache_singleblock;
+ else
+ c->get_matches_func = lzx_get_matches_fillcache_multiblock;
+ c->skip_bytes_func = lzx_skip_bytes_fillcache;
+ } else {
+ if (block_size == c->cur_window_size)
+ c->get_matches_func = lzx_get_matches_nocache_singleblock;
+ else
+ c->get_matches_func = lzx_get_matches_nocache_multiblock;
+ c->skip_bytes_func = lzx_skip_bytes_nocache;
}
- LZX_DEBUG("Verified SA (len %u)", n);
-#endif /* ENABLE_LZX_DEBUG */
- /* Compute ISA (Inverse Suffix Array) */
- for (input_idx_t r = 0; r < n; r++)
- ISA[SA[r]] = r;
+ /* No matches will extend beyond the end of the block. */
+ c->match_window_end = block_start_pos + block_size;
- /* Compute LCP (longest common prefix) array.
+ /* The first optimization pass will use a default cost model. Each
+ * additional optimization pass will use a cost model computed from the
+ * previous pass.
*
- * Algorithm adapted from Kasai et al. 2001: "Linear-Time
- * Longest-Common-Prefix Computation in Suffix Arrays and Its
- * Applications". */
- {
- input_idx_t h = 0;
- for (input_idx_t i = 0; i < n; i++) {
- input_idx_t r = ISA[i];
- if (r > 0) {
- input_idx_t j = SA[r - 1];
-
- input_idx_t lim = min(n - i, n - j);
-
- while (h < lim && T[i + h] == T[j + h])
- h++;
- LCP[r] = h;
- if (h > 0)
- h--;
- }
+ * To improve performance we only generate the array containing the
+ * matches and literals in intermediate form on the final pass. For
+ * earlier passes, tallying symbol frequencies is sufficient. */
+ lzx_set_default_costs(&c->costs, c->num_main_syms);
+
+ next_chosen_item_ptr = NULL;
+ orig_queue = c->queue;
+ do {
+ /* Reset the match-finder wrapper. */
+ c->match_window_pos = block_start_pos;
+ c->cache_ptr = c->cached_matches;
+
+ if (num_passes_remaining == 1) {
+ /* Last pass: actually generate the items. */
+ next_chosen_item = c->chosen_items;
+ next_chosen_item_ptr = &next_chosen_item;
}
- }
-#ifdef ENABLE_LZX_DEBUG
- /* Verify LCP array. */
- for (input_idx_t r = 0; r < n - 1; r++) {
- LZX_ASSERT(ISA[SA[r]] == r);
- LZX_ASSERT(ISA[SA[r + 1]] == r + 1);
+ /* Choose the items. */
+ lzx_optim_pass(c, next_chosen_item_ptr);
- input_idx_t i1 = SA[r];
- input_idx_t i2 = SA[r + 1];
- input_idx_t lcp = LCP[r + 1];
+ if (num_passes_remaining > 1) {
+ /* This isn't the last pass. */
- input_idx_t n1 = n - i1;
- input_idx_t n2 = n - i2;
+ /* Make the Huffman codes from the symbol frequencies. */
+ lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
+ c->num_main_syms);
- LZX_ASSERT(lcp <= min(n1, n2));
+ /* Update symbol costs. */
+ lzx_set_costs(c, &c->codes[c->codes_index].lens);
- LZX_ASSERT(memcmp(&T[i1], &T[i2], lcp) == 0);
- if (lcp < min(n1, n2))
- LZX_ASSERT(T[i1 + lcp] != T[i2 + lcp]);
- }
-#endif /* ENABLE_LZX_DEBUG */
+ /* Reset symbol frequencies. */
+ memset(&c->freqs, 0, sizeof(c->freqs));
- /* Compute salink.next and salink.lcpnext.
- *
- * Algorithm adapted from Crochemore et al. 2009:
- * "LPF computation revisited".
- *
- * Note: we cap lcpnext to the maximum match length so that the
- * match-finder need not worry about it later. */
- link[n - 1].next = (input_idx_t)~0U;
- link[n - 1].prev = (input_idx_t)~0U;
- link[n - 1].lcpnext = 0;
- link[n - 1].lcpprev = 0;
- for (input_idx_t r = n - 2; r != (input_idx_t)~0U; r--) {
- input_idx_t t = r + 1;
- input_idx_t l = LCP[t];
- while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
- l = min(l, link[t].lcpnext);
- t = link[t].next;
- }
- link[r].next = t;
- link[r].lcpnext = min(l, max_match_len);
- LZX_ASSERT(t == (input_idx_t)~0U || l <= n - SA[t]);
- LZX_ASSERT(l <= n - SA[r]);
- LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
- }
+ /* Reset the match offset LRU queue to what it was at
+ * the beginning of the block. */
+ c->queue = orig_queue;
- /* Compute salink.prev and salink.lcpprev.
- *
- * Algorithm adapted from Crochemore et al. 2009:
- * "LPF computation revisited".
- *
- * Note: we cap lcpprev to the maximum match length so that the
- * match-finder need not worry about it later. */
- link[0].prev = (input_idx_t)~0;
- link[0].next = (input_idx_t)~0;
- link[0].lcpprev = 0;
- link[0].lcpnext = 0;
- for (input_idx_t r = 1; r < n; r++) {
- input_idx_t t = r - 1;
- input_idx_t l = LCP[r];
- while (t != (input_idx_t)~0 && SA[t] > SA[r]) {
- l = min(l, link[t].lcpprev);
- t = link[t].prev;
+ /* Choose appopriate match-finder wrapper functions. */
+ if (c->cache_ptr <= c->cache_limit) {
+ c->get_matches_func = lzx_get_matches_usecache_nocheck;
+ c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
+ } else {
+ c->get_matches_func = lzx_get_matches_usecache;
+ c->skip_bytes_func = lzx_skip_bytes_usecache;
+ }
}
- link[r].prev = t;
- link[r].lcpprev = min(l, max_match_len);
- LZX_ASSERT(t == (input_idx_t)~0 || l <= n - SA[t]);
- LZX_ASSERT(l <= n - SA[r]);
- LZX_ASSERT(memcmp(&T[SA[r]], &T[SA[t]], l) == 0);
- }
-}
-
-/* Prepare the input window into one or more LZX blocks ready to be output. */
-static void
-lzx_prepare_blocks(struct lzx_compressor * ctx)
-{
- /* Initialize the match-finder. */
- lzx_lz_init_matchfinder(ctx->window, ctx->window_size,
- ctx->SA, ctx->ISA, ctx->LCP, ctx->salink,
- LZX_MAX_MATCH_LEN);
- ctx->cached_matches_pos = 0;
- ctx->matches_cached = false;
- ctx->match_window_pos = 0;
-
- /* Set up a default cost model. */
- lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
-
- ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
- for (unsigned i = 0; i < ctx->num_blocks; i++) {
- unsigned pos = LZX_DIV_BLOCK_SIZE * i;
- ctx->block_specs[i].window_pos = pos;
- ctx->block_specs[i].block_size = min(ctx->window_size - pos, LZX_DIV_BLOCK_SIZE);
- }
+ } while (--num_passes_remaining);
- /* Determine sequence of matches/literals to output for each block. */
- lzx_optimize_blocks(ctx);
+ /* Return the number of items chosen. */
+ return next_chosen_item - c->chosen_items;
}
/*
- * This is the fast version of lzx_prepare_blocks(). This version "quickly"
- * prepares a single compressed block containing the entire input. See the
- * description of the "Fast algorithm" at the beginning of this file for more
- * information.
- *
- * Input --- the preprocessed data:
+ * Choose the matches/literals with which to output the block of data beginning
+ * at '&c->cur_window[block_start_pos]' and extending for 'block_size' bytes.
*
- * ctx->window[]
- * ctx->window_size
+ * The frequences of the Huffman symbols in the block will be tallied in
+ * 'c->freqs'.
*
- * Output --- the block specification and the corresponding match/literal data:
+ * 'c->queue' must specify the state of the queue at the beginning of this block.
+ * This function will update it to the state of the queue at the end of this
+ * block.
*
- * ctx->block_specs[]
- * ctx->num_blocks
- * ctx->chosen_matches[]
+ * Returns the number of matches/literals that were chosen and written to
+ * 'c->chosen_items' in the 'struct lzx_item' intermediate representation.
*/
-static void
-lzx_prepare_block_fast(struct lzx_compressor * ctx)
+static u32
+lzx_choose_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
{
- struct lzx_record_ctx record_ctx;
- struct lzx_block_spec *spec;
-
- /* Parameters to hash chain LZ match finder
- * (lazy with 1 match lookahead) */
- static const struct lz_params lzx_lz_params = {
- /* Although LZX_MIN_MATCH_LEN == 2, length 2 matches typically
- * aren't worth choosing when using greedy or lazy parsing. */
- .min_match = 3,
- .max_match = LZX_MAX_MATCH_LEN,
- .max_offset = 32768,
- .good_match = LZX_MAX_MATCH_LEN,
- .nice_match = LZX_MAX_MATCH_LEN,
- .max_chain_len = LZX_MAX_MATCH_LEN,
- .max_lazy_match = LZX_MAX_MATCH_LEN,
- .too_far = 4096,
- };
-
- /* Initialize symbol frequencies and match offset LRU queue. */
- memset(&record_ctx.freqs, 0, sizeof(struct lzx_freqs));
- lzx_lru_queue_init(&record_ctx.queue);
- record_ctx.matches = ctx->chosen_matches;
-
- /* Determine series of matches/literals to output. */
- lz_analyze_block(ctx->window,
- ctx->window_size,
- lzx_record_match,
- lzx_record_literal,
- &record_ctx,
- &lzx_lz_params,
- ctx->prev_tab);
-
- /* Set up block specification. */
- spec = &ctx->block_specs[0];
- spec->block_type = LZX_BLOCKTYPE_ALIGNED;
- spec->window_pos = 0;
- spec->block_size = ctx->window_size;
- spec->num_chosen_matches = (record_ctx.matches - ctx->chosen_matches);
- spec->chosen_matches_start_pos = 0;
- lzx_make_huffman_codes(&record_ctx.freqs, &spec->codes,
- ctx->num_main_syms);
- ctx->num_blocks = 1;
+ return (*c->params.choose_items_for_block)(c, block_start_pos, block_size);
}
+/* Initialize c->offset_slot_fast. */
static void
-do_call_insn_translation(u32 *call_insn_target, int input_pos,
- s32 file_size)
+lzx_init_offset_slot_fast(struct lzx_compressor *c)
{
- s32 abs_offset;
- s32 rel_offset;
-
- rel_offset = le32_to_cpu(*call_insn_target);
- if (rel_offset >= -input_pos && rel_offset < file_size) {
- if (rel_offset < file_size - input_pos) {
- /* "good translation" */
- abs_offset = rel_offset + input_pos;
- } else {
- /* "compensating translation" */
- abs_offset = rel_offset - file_size;
- }
- *call_insn_target = cpu_to_le32(abs_offset);
- }
-}
+ u8 slot = 0;
-/* This is the reverse of undo_call_insn_preprocessing() in lzx-decompress.c.
- * See the comment above that function for more information. */
-static void
-do_call_insn_preprocessing(u8 data[], int size)
-{
- for (int i = 0; i < size - 10; i++) {
- if (data[i] == 0xe8) {
- do_call_insn_translation((u32*)&data[i + 1], i,
- LZX_WIM_MAGIC_FILESIZE);
- i += 4;
- }
- }
-}
+ for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
-/* API function documented in wimlib.h */
-WIMLIBAPI unsigned
-wimlib_lzx_compress2(const void * const restrict uncompressed_data,
- unsigned const uncompressed_len,
- void * const restrict compressed_data,
- struct wimlib_lzx_context * const restrict lzx_ctx)
-{
- struct lzx_compressor *ctx = (struct lzx_compressor*)lzx_ctx;
- struct output_bitstream ostream;
- input_idx_t compressed_len;
+ while (offset + LZX_OFFSET_OFFSET >= lzx_offset_slot_base[slot + 1])
+ slot++;
- if (uncompressed_len < 100) {
- LZX_DEBUG("Too small to bother compressing.");
- return 0;
+ c->offset_slot_fast[offset] = slot;
}
+}
- if (uncompressed_len > ctx->max_window_size) {
- LZX_DEBUG("Can't compress %u bytes using window of %u bytes!",
- uncompressed_len, ctx->max_window_size);
- return 0;
- }
+/* Set internal compression parameters for the specified compression level and
+ * maximum window size. */
+static void
+lzx_build_params(unsigned int compression_level, u32 max_window_size,
+ struct lzx_compressor_params *lzx_params)
+{
+ if (compression_level < 25) {
- LZX_DEBUG("Attempting to compress %u bytes...", uncompressed_len);
+ /* Fast compression: Use lazy parsing. */
- /* The input data must be preprocessed. To avoid changing the original
- * input, copy it to a temporary buffer. */
- memcpy(ctx->window, uncompressed_data, uncompressed_len);
- ctx->window_size = uncompressed_len;
+ lzx_params->choose_items_for_block = lzx_choose_lazy_items_for_block;
+ lzx_params->num_optim_passes = 1;
- /* This line is unnecessary; it just avoids inconsequential accesses of
- * uninitialized memory that would show up in memory-checking tools such
- * as valgrind. */
- memset(&ctx->window[ctx->window_size], 0, 12);
+ /* When lazy parsing, the hash chain match-finding algorithm is
+ * fastest unless the window is too large.
+ *
+ * TODO: something like hash arrays would actually be better
+ * than binary trees on large windows. */
+ if (max_window_size <= 262144)
+ lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
+ else
+ lzx_params->mf_algo = LZ_MF_BINARY_TREES;
- LZX_DEBUG("Preprocessing data...");
+ /* When lazy parsing, don't bother with length 2 matches. */
+ lzx_params->min_match_length = 3;
- /* Before doing any actual compression, do the call instruction (0xe8
- * byte) translation on the uncompressed data. */
- do_call_insn_preprocessing(ctx->window, ctx->window_size);
+ /* Scale nice_match_length and max_search_depth with the
+ * compression level. */
+ lzx_params->nice_match_length = 25 + compression_level * 2;
+ lzx_params->max_search_depth = 25 + compression_level;
+ } else {
- LZX_DEBUG("Preparing blocks...");
+ /* Normal / high compression: Use near-optimal parsing. */
- /* Prepare the compressed data. */
- if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_FAST)
- lzx_prepare_block_fast(ctx);
- else
- lzx_prepare_blocks(ctx);
+ lzx_params->choose_items_for_block = lzx_choose_near_optimal_items_for_block;
- LZX_DEBUG("Writing compressed blocks...");
+ /* Set a number of optimization passes appropriate for the
+ * compression level. */
- /* Generate the compressed data. */
- init_output_bitstream(&ostream, compressed_data, ctx->window_size - 1);
- lzx_write_all_blocks(ctx, &ostream);
+ lzx_params->num_optim_passes = 1;
- LZX_DEBUG("Flushing bitstream...");
- compressed_len = flush_output_bitstream(&ostream);
- if (compressed_len == ~(input_idx_t)0) {
- LZX_DEBUG("Data did not compress to less than original length!");
- return 0;
- }
+ if (compression_level >= 40)
+ lzx_params->num_optim_passes++;
- LZX_DEBUG("Done: compressed %u => %u bytes.",
- uncompressed_len, compressed_len);
-
- /* Verify that we really get the same thing back when decompressing.
- * Although this could be disabled by default in all cases, it only
- * takes around 2-3% of the running time of the slow algorithm to do the
- * verification. */
- if (ctx->params.algorithm == WIMLIB_LZX_ALGORITHM_SLOW
- #if defined(ENABLE_LZX_DEBUG) || defined(ENABLE_VERIFY_COMPRESSION)
- || 1
- #endif
- )
- {
- /* The decompression buffer can be any temporary space that's no
- * longer needed. */
- u8 *buf = (u8*)(ctx->SA ? ctx->SA : ctx->prev_tab);
-
- if (wimlib_lzx_decompress2(compressed_data, compressed_len,
- buf, uncompressed_len, ctx->max_window_size))
- {
- ERROR("Failed to decompress data we "
- "compressed using LZX algorithm");
- wimlib_assert(0);
- return 0;
+ /* Use more optimization passes for higher compression levels.
+ * But the more passes there are, the less they help --- so
+ * don't add them linearly. */
+ if (compression_level >= 70) {
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 100)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 150)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 200)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 300)
+ lzx_params->num_optim_passes++;
}
- if (memcmp(uncompressed_data, buf, uncompressed_len)) {
- ERROR("Data we compressed using LZX algorithm "
- "didn't decompress to original");
- wimlib_assert(0);
- return 0;
- }
+ /* When doing near-optimal parsing, the hash chain match-finding
+ * algorithm is good if the window size is small and we're only
+ * doing one optimization pass. Otherwise, the binary tree
+ * algorithm is the way to go. */
+ if (max_window_size <= 32768 && lzx_params->num_optim_passes == 1)
+ lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
+ else
+ lzx_params->mf_algo = LZ_MF_BINARY_TREES;
+
+ /* When doing near-optimal parsing, allow length 2 matches if
+ * the compression level is sufficiently high. */
+ if (compression_level >= 45)
+ lzx_params->min_match_length = 2;
+ else
+ lzx_params->min_match_length = 3;
+
+ /* Scale nice_match_length and max_search_depth with the
+ * compression level. */
+ lzx_params->nice_match_length = min(((u64)compression_level * 32) / 50,
+ LZX_MAX_MATCH_LEN);
+ lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
+ LZX_MAX_MATCH_LEN);
}
- return compressed_len;
}
-static bool
-lzx_params_compatible(const struct wimlib_lzx_params *oldparams,
- const struct wimlib_lzx_params *newparams)
+/* Given the internal compression parameters and maximum window size, build the
+ * Lempel-Ziv match-finder parameters. */
+static void
+lzx_build_mf_params(const struct lzx_compressor_params *lzx_params,
+ u32 max_window_size, struct lz_mf_params *mf_params)
{
- return 0 == memcmp(oldparams, newparams, sizeof(struct wimlib_lzx_params));
+ memset(mf_params, 0, sizeof(*mf_params));
+
+ mf_params->algorithm = lzx_params->mf_algo;
+ mf_params->max_window_size = max_window_size;
+ mf_params->min_match_len = lzx_params->min_match_length;
+ mf_params->max_match_len = LZX_MAX_MATCH_LEN;
+ mf_params->max_search_depth = lzx_params->max_search_depth;
+ mf_params->nice_match_len = lzx_params->nice_match_length;
}
-static struct wimlib_lzx_params lzx_user_default_params;
-static struct wimlib_lzx_params *lzx_user_default_params_ptr;
+static void
+lzx_free_compressor(void *_c);
-static bool
-lzx_params_valid(const struct wimlib_lzx_params *params)
+static u64
+lzx_get_needed_memory(size_t max_block_size, unsigned int compression_level)
{
- /* Validate parameters. */
- if (params->size_of_this != sizeof(struct wimlib_lzx_params)) {
- LZX_DEBUG("Invalid parameter structure size!");
- return false;
- }
+ struct lzx_compressor_params params;
+ u64 size = 0;
+ unsigned window_order;
+ u32 max_window_size;
- if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
- params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
- {
- LZX_DEBUG("Invalid algorithm.");
- return false;
- }
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
+ return 0;
+ max_window_size = max_block_size;
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- if (params->alg_params.slow.num_optim_passes < 1)
- {
- LZX_DEBUG("Invalid number of optimization passes!");
- return false;
- }
+ lzx_build_params(compression_level, max_window_size, ¶ms);
- if (params->alg_params.slow.main_nostat_cost < 1 ||
- params->alg_params.slow.main_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid main_nostat_cost!");
- return false;
- }
+ size += sizeof(struct lzx_compressor);
- if (params->alg_params.slow.len_nostat_cost < 1 ||
- params->alg_params.slow.len_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid len_nostat_cost!");
- return false;
- }
+ /* cur_window */
+ size += max_window_size;
- if (params->alg_params.slow.aligned_nostat_cost < 1 ||
- params->alg_params.slow.aligned_nostat_cost > 8)
- {
- LZX_DEBUG("Invalid aligned_nostat_cost!");
- return false;
- }
- }
- return true;
-}
+ /* mf */
+ size += lz_mf_get_needed_memory(params.mf_algo, max_window_size);
-/* API function documented in wimlib.h */
-WIMLIBAPI int
-wimlib_lzx_set_default_params(const struct wimlib_lzx_params * params)
-{
- if (params) {
- if (!lzx_params_valid(params))
- return WIMLIB_ERR_INVALID_PARAM;
- lzx_user_default_params = *params;
- lzx_user_default_params_ptr = &lzx_user_default_params;
- } else {
- lzx_user_default_params_ptr = NULL;
- }
- return 0;
+ /* cached_matches */
+ if (params.num_optim_passes > 1)
+ size += LZX_CACHE_LEN * sizeof(struct lz_match);
+ else
+ size += LZX_MAX_MATCHES_PER_POS * sizeof(struct lz_match);
+ return size;
}
-/* API function documented in wimlib.h */
-WIMLIBAPI int
-wimlib_lzx_alloc_context(u32 window_size,
- const struct wimlib_lzx_params *params,
- struct wimlib_lzx_context **ctx_pp)
+static int
+lzx_create_compressor(size_t max_block_size, unsigned int compression_level,
+ void **c_ret)
{
-
- LZX_DEBUG("Allocating LZX context...");
-
- if (!lzx_window_size_valid(window_size))
+ struct lzx_compressor *c;
+ struct lzx_compressor_params params;
+ struct lz_mf_params mf_params;
+ unsigned window_order;
+ u32 max_window_size;
+
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return WIMLIB_ERR_INVALID_PARAM;
+ max_window_size = max_block_size;
- struct lzx_compressor *ctx;
-
- static const struct wimlib_lzx_params fast_default = {
- .size_of_this = sizeof(struct wimlib_lzx_params),
- .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
- .use_defaults = 0,
- .alg_params = {
- .fast = {
- },
- },
- };
- static const struct wimlib_lzx_params slow_default = {
- .size_of_this = sizeof(struct wimlib_lzx_params),
- .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
- .use_defaults = 0,
- .alg_params = {
- .slow = {
- .use_len2_matches = 1,
- .num_fast_bytes = 32,
- .num_optim_passes = 2,
- .max_search_depth = 50,
- .max_matches_per_pos = 3,
- .main_nostat_cost = 15,
- .len_nostat_cost = 15,
- .aligned_nostat_cost = 7,
- },
- },
- };
-
- if (params) {
- if (!lzx_params_valid(params))
- return WIMLIB_ERR_INVALID_PARAM;
- } else {
- LZX_DEBUG("Using default algorithm and parameters.");
- if (lzx_user_default_params_ptr)
- params = lzx_user_default_params_ptr;
- else
- params = &slow_default;
- }
-
- if (params->use_defaults) {
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
- params = &slow_default;
- else
- params = &fast_default;
- }
-
- if (ctx_pp) {
- ctx = *(struct lzx_compressor**)ctx_pp;
+ lzx_build_params(compression_level, max_window_size, ¶ms);
+ lzx_build_mf_params(¶ms, max_window_size, &mf_params);
+ if (!lz_mf_params_valid(&mf_params))
+ return WIMLIB_ERR_INVALID_PARAM;
- if (ctx &&
- lzx_params_compatible(&ctx->params, params) &&
- ctx->max_window_size == window_size)
- return 0;
+ c = CALLOC(1, sizeof(struct lzx_compressor));
+ if (!c)
+ goto oom;
+
+ c->params = params;
+ c->num_main_syms = lzx_get_num_main_syms(window_order);
+ c->window_order = window_order;
+
+ /* The window is allocated as 16-byte aligned to speed up memcpy() and
+ * enable lzx_e8_filter() optimization on x86_64. */
+ c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
+ if (!c->cur_window)
+ goto oom;
+
+ c->mf = lz_mf_alloc(&mf_params);
+ if (!c->mf)
+ goto oom;
+
+ if (params.num_optim_passes > 1) {
+ c->cached_matches = MALLOC(LZX_CACHE_LEN *
+ sizeof(struct lz_match));
+ if (!c->cached_matches)
+ goto oom;
+ c->cache_limit = c->cached_matches + LZX_CACHE_LEN -
+ (LZX_MAX_MATCHES_PER_POS + 1);
} else {
- LZX_DEBUG("Check parameters only.");
- return 0;
+ c->cached_matches = MALLOC(LZX_MAX_MATCHES_PER_POS *
+ sizeof(struct lz_match));
+ if (!c->cached_matches)
+ goto oom;
}
- LZX_DEBUG("Allocating memory.");
-
- ctx = CALLOC(1, sizeof(struct lzx_compressor));
- if (ctx == NULL)
- goto err;
-
- ctx->num_main_syms = lzx_get_num_main_syms(window_size);
- ctx->max_window_size = window_size;
- ctx->window = MALLOC(window_size + 12);
- if (ctx->window == NULL)
- goto err;
+ lzx_init_offset_slot_fast(c);
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_FAST) {
- ctx->prev_tab = MALLOC(window_size * sizeof(ctx->prev_tab[0]));
- if (ctx->prev_tab == NULL)
- goto err;
- }
-
- size_t block_specs_length = DIV_ROUND_UP(window_size, LZX_DIV_BLOCK_SIZE);
- ctx->block_specs = MALLOC(block_specs_length * sizeof(ctx->block_specs[0]));
- if (ctx->block_specs == NULL)
- goto err;
-
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- ctx->SA = MALLOC(3U * window_size * sizeof(ctx->SA[0]));
- if (ctx->SA == NULL)
- goto err;
- ctx->ISA = ctx->SA + window_size;
- ctx->LCP = ctx->ISA + window_size;
-
- ctx->salink = MALLOC(window_size * sizeof(ctx->salink[0]));
- if (ctx->salink == NULL)
- goto err;
- }
+ *c_ret = c;
+ return 0;
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- ctx->optimum = MALLOC((LZX_OPTIM_ARRAY_SIZE + LZX_MAX_MATCH_LEN) *
- sizeof(ctx->optimum[0]));
- if (ctx->optimum == NULL)
- goto err;
- }
+oom:
+ lzx_free_compressor(c);
+ return WIMLIB_ERR_NOMEM;
+}
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- u32 cache_per_pos;
+static size_t
+lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
+ void *compressed_data, size_t compressed_size_avail, void *_c)
+{
+ struct lzx_compressor *c = _c;
+ struct lzx_output_bitstream os;
+ u32 num_chosen_items;
+ const struct lzx_lens *prev_lens;
+ u32 block_start_pos;
+ u32 block_size;
+ int block_type;
- cache_per_pos = params->alg_params.slow.max_matches_per_pos;
- if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
- cache_per_pos = LZX_MAX_CACHE_PER_POS;
+ /* Don't bother compressing very small inputs. */
+ if (uncompressed_size < 100)
+ return 0;
- ctx->cached_matches = MALLOC(window_size * (cache_per_pos + 1) *
- sizeof(ctx->cached_matches[0]));
- if (ctx->cached_matches == NULL)
- goto err;
- }
+ /* The input data must be preprocessed. To avoid changing the original
+ * input data, copy it to a temporary buffer. */
+ memcpy(c->cur_window, uncompressed_data, uncompressed_size);
+ c->cur_window_size = uncompressed_size;
- ctx->chosen_matches = MALLOC(window_size * sizeof(ctx->chosen_matches[0]));
- if (ctx->chosen_matches == NULL)
- goto err;
+ /* Preprocess the data. */
+ lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
- memcpy(&ctx->params, params, sizeof(struct wimlib_lzx_params));
- memset(&ctx->zero_codes, 0, sizeof(ctx->zero_codes));
+ /* Load the window into the match-finder. */
+ lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
- LZX_DEBUG("Successfully allocated new LZX context.");
+ /* Initialize the match offset LRU queue. */
+ lzx_lru_queue_init(&c->queue);
- wimlib_lzx_free_context(*ctx_pp);
- *ctx_pp = (struct wimlib_lzx_context*)ctx;
- return 0;
+ /* Initialize the output bitstream. */
+ lzx_init_output(&os, compressed_data, compressed_size_avail);
-err:
- wimlib_lzx_free_context((struct wimlib_lzx_context*)ctx);
- LZX_DEBUG("Ran out of memory.");
- return WIMLIB_ERR_NOMEM;
+ /* Compress the data block by block.
+ *
+ * TODO: The compression ratio could be slightly improved by performing
+ * data-dependent block splitting instead of using fixed-size blocks.
+ * Doing so well is a computationally hard problem, however. */
+ block_start_pos = 0;
+ c->codes_index = 0;
+ prev_lens = &c->zero_lens;
+ do {
+ /* Compute the block size. */
+ block_size = min(LZX_DIV_BLOCK_SIZE,
+ uncompressed_size - block_start_pos);
+
+ /* Reset symbol frequencies. */
+ memset(&c->freqs, 0, sizeof(c->freqs));
+
+ /* Prepare the matches/literals for the block. */
+ num_chosen_items = lzx_choose_items_for_block(c,
+ block_start_pos,
+ block_size);
+
+ /* Make the Huffman codes from the symbol frequencies. */
+ lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
+ c->num_main_syms);
+
+ /* Choose the best block type.
+ *
+ * Note: we currently don't consider uncompressed blocks. */
+ block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
+ &c->codes[c->codes_index]);
+
+ /* Write the compressed block to the output buffer. */
+ lzx_write_compressed_block(block_type,
+ block_size,
+ c->window_order,
+ c->num_main_syms,
+ c->chosen_items,
+ num_chosen_items,
+ &c->codes[c->codes_index],
+ prev_lens,
+ &os);
+
+ /* The current codeword lengths become the previous lengths. */
+ prev_lens = &c->codes[c->codes_index].lens;
+ c->codes_index ^= 1;
+
+ block_start_pos += block_size;
+
+ } while (block_start_pos != uncompressed_size);
+
+ return lzx_flush_output(&os);
}
-/* API function documented in wimlib.h */
-WIMLIBAPI void
-wimlib_lzx_free_context(struct wimlib_lzx_context *_ctx)
+static void
+lzx_free_compressor(void *_c)
{
- struct lzx_compressor *ctx = (struct lzx_compressor*)_ctx;
-
- if (ctx) {
- FREE(ctx->chosen_matches);
- FREE(ctx->cached_matches);
- FREE(ctx->optimum);
- FREE(ctx->salink);
- FREE(ctx->SA);
- FREE(ctx->block_specs);
- FREE(ctx->prev_tab);
- FREE(ctx->window);
- FREE(ctx);
- }
-}
+ struct lzx_compressor *c = _c;
-/* API function documented in wimlib.h */
-WIMLIBAPI unsigned
-wimlib_lzx_compress(const void * const restrict uncompressed_data,
- unsigned const uncompressed_len,
- void * const restrict compressed_data)
-{
- int ret;
- struct wimlib_lzx_context *ctx = NULL;
- unsigned compressed_len;
-
- ret = wimlib_lzx_alloc_context(32768, NULL, &ctx);
- if (ret) {
- wimlib_assert(ret != WIMLIB_ERR_INVALID_PARAM);
- WARNING("Couldn't allocate LZX compression context: %"TS"",
- wimlib_get_error_string(ret));
- return 0;
+ if (c) {
+ ALIGNED_FREE(c->cur_window);
+ lz_mf_free(c->mf);
+ FREE(c->cached_matches);
+ FREE(c);
}
-
- compressed_len = wimlib_lzx_compress2(uncompressed_data,
- uncompressed_len,
- compressed_data,
- ctx);
-
- wimlib_lzx_free_context(ctx);
-
- return compressed_len;
}
+
+const struct compressor_ops lzx_compressor_ops = {
+ .get_needed_memory = lzx_get_needed_memory,
+ .create_compressor = lzx_create_compressor,
+ .compress = lzx_compress,
+ .free_compressor = lzx_free_compressor,
+};