/*
* Copyright (C) 2012, 2013, 2014 Eric Biggers
*
- * This file is part of wimlib, a library for working with WIM files.
- *
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 3 of the License, or (at your option)
- * any later version.
- *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
- * You should have received a copy of the GNU General Public License
- * along with wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
*/
/*
- * This file contains a compressor for the LZX ("Lempel-Ziv eXtended"?)
- * compression format, as used in the WIM (Windows IMaging) file format. This
- * code may need some slight modifications to be used outside of the WIM format.
- * In particular, in other situations the LZX block header might be slightly
- * different, and a sliding window rather than a fixed-size window might be
- * required.
+ * This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
+ * compression format, as used in the WIM (Windows IMaging) file format.
*
- * ----------------------------------------------------------------------------
+ * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
+ * "Near-optimal" is significantly slower than "lazy", but results in a better
+ * compression ratio. The "near-optimal" algorithm is used at the default
+ * compression level.
*
- * Format Overview
+ * This file may need some slight modifications to be used outside of the WIM
+ * format. In particular, in other situations the LZX block header might be
+ * slightly different, and a sliding window rather than a fixed-size window
+ * might be required.
*
- * The primary reference for LZX is the specification released by Microsoft.
- * However, the comments in lzx-decompress.c provide more information about LZX
- * and note some errors in the Microsoft specification.
- *
- * LZX shares many similarities with DEFLATE, the format used by zlib and gzip.
- * Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain details
- * are quite similar, such as the method for storing Huffman codes. However,
- * the main differences are:
+ * Note: LZX is a compression format derived from DEFLATE, the format used by
+ * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
+ * Certain details are quite similar, such as the method for storing Huffman
+ * codes. However, the main differences are:
*
* - LZX preprocesses the data to attempt to make x86 machine code slightly more
* compressible before attempting to compress it further.
*
* - LZX uses a "main" alphabet which combines literals and matches, with the
* match symbols containing a "length header" (giving all or part of the match
- * length) and a "position slot" (giving, roughly speaking, the order of
+ * length) and an "offset slot" (giving, roughly speaking, the order of
* magnitude of the match offset).
*
* - LZX does not have static Huffman blocks (that is, the kind with preset
* Huffman codes); however it does have two types of dynamic Huffman blocks
* ("verbatim" and "aligned").
*
- * - LZX has a minimum match length of 2 rather than 3.
- *
- * - In LZX, match offsets 0 through 2 actually represent entries in an LRU
- * queue of match offsets. This is very useful for certain types of files,
- * such as binary files that have repeating records.
- *
- * ----------------------------------------------------------------------------
- *
- * Algorithmic Overview
- *
- * At a high level, any implementation of LZX compression must operate as
- * follows:
- *
- * 1. Preprocess the input data to translate the targets of 32-bit x86 call
- * instructions to absolute offsets. (Actually, this is required for WIM,
- * but might not be in other places LZX is used.)
- *
- * 2. Find a sequence of LZ77-style matches and literal bytes that expands to
- * the preprocessed data.
- *
- * 3. Divide the match/literal sequence into one or more LZX blocks, each of
- * which may be "uncompressed", "verbatim", or "aligned".
- *
- * 4. Output each LZX block.
- *
- * Step (1) is fairly straightforward. It requires looking for 0xe8 bytes in
- * the input data and performing a translation on the 4 bytes following each
- * one.
- *
- * Step (4) is complicated, but it is mostly determined by the LZX format. The
- * only real choice we have is what algorithm to use to build the length-limited
- * canonical Huffman codes. See lzx_write_all_blocks() for details.
- *
- * That leaves steps (2) and (3) as where all the hard stuff happens. Focusing
- * on step (2), we need to do LZ77-style parsing on the input data, or "window",
- * to divide it into a sequence of matches and literals. Each position in the
- * window might have multiple matches associated with it, and we need to choose
- * which one, if any, to actually use. Therefore, the problem can really be
- * divided into two areas of concern: (a) finding matches at a given position,
- * which we shall call "match-finding", and (b) choosing whether to use a
- * match or a literal at a given position, and if using a match, which one (if
- * there is more than one available). We shall call this "match-choosing". We
- * first consider match-finding, then match-choosing.
- *
- * ----------------------------------------------------------------------------
- *
- * Match-finding
+ * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
+ * useful, but generally only if the parser is smart about choosing them.
*
- * Given a position in the window, we want to find LZ77-style "matches" with
- * that position at previous positions in the window. With LZX, the minimum
- * match length is 2 and the maximum match length is 257. The only restriction
- * on offsets is that LZX does not allow the last 2 bytes of the window to match
- * the the beginning of the window.
- *
- * There are a number of algorithms that can be used for this, including hash
- * chains, binary trees, and suffix arrays. Binary trees generally work well
- * for LZX compression since it uses medium-size windows (2^15 to 2^21 bytes).
- * However, when compressing in a fast mode where many positions are skipped
- * (not searched for matches), hash chains are faster.
- *
- * Since the match-finders are not specific to LZX, I will not explain them in
- * detail here. Instead, see lz_hash_chains.c and lz_binary_trees.c.
- *
- * ----------------------------------------------------------------------------
- *
- * Match-choosing
- *
- * Usually, choosing the longest match is best because it encodes the most data
- * in that one item. However, sometimes the longest match is not optimal
- * because (a) choosing a long match now might prevent using an even longer
- * match later, or (b) more generally, what we actually care about is the number
- * of bits it will ultimately take to output each match or literal, which is
- * actually dependent on the entropy encoding using by the underlying
- * compression format. Consequently, a longer match usually, but not always,
- * takes fewer bits to encode than multiple shorter matches or literals that
- * cover the same data.
- *
- * This problem of choosing the truly best match/literal sequence is probably
- * impossible to solve efficiently when combined with entropy encoding. If we
- * knew how many bits it takes to output each match/literal, then we could
- * choose the optimal sequence using shortest-path search a la Dijkstra's
- * algorithm. However, with entropy encoding, the chosen match/literal sequence
- * affects its own encoding. Therefore, we can't know how many bits it will
- * take to actually output any one match or literal until we have actually
- * chosen the full sequence of matches and literals.
- *
- * Notwithstanding the entropy encoding problem, we also aren't guaranteed to
- * choose the optimal match/literal sequence unless the match-finder (see
- * section "Match-finder") provides the match-chooser with all possible matches
- * at each position. However, this is not computationally efficient. For
- * example, there might be many matches of the same length, and usually (but not
- * always) the best choice is the one with the smallest offset. So in practice,
- * it's fine to only consider the smallest offset for a given match length at a
- * given position. (Actually, for LZX, it's also worth considering repeat
- * offsets.)
- *
- * In addition, as mentioned earlier, in LZX we have the choice of using
- * multiple blocks, each of which resets the Huffman codes. This expands the
- * search space even further. Therefore, to simplify the problem, we currently
- * we don't attempt to actually choose the LZX blocks based on the data.
- * Instead, we just divide the data into fixed-size blocks of LZX_DIV_BLOCK_SIZE
- * bytes each, and always use verbatim or aligned blocks (never uncompressed).
- * A previous version of this code recursively split the input data into
- * equal-sized blocks, up to a maximum depth, and chose the lowest-cost block
- * divisions. However, this made compression much slower and did not actually
- * help very much. It remains an open question whether a sufficiently fast and
- * useful block-splitting algorithm is possible for LZX. Essentially the same
- * problem also applies to DEFLATE. The Microsoft LZX compressor seemingly does
- * do block splitting, although I don't know how fast or useful it is,
- * specifically.
- *
- * Now, back to the entropy encoding problem. The "solution" is to use an
- * iterative approach to compute a good, but not necessarily optimal,
- * match/literal sequence. Start with a fixed assignment of symbol costs and
- * choose an "optimal" match/literal sequence based on those costs, using
- * shortest-path seach a la Dijkstra's algorithm. Then, for each iteration of
- * the optimization, update the costs based on the entropy encoding of the
- * current match/literal sequence, then choose a new match/literal sequence
- * based on the updated costs. Usually, the actual cost to output the current
- * match/literal sequence will decrease in each iteration until it converges on
- * a fixed point. This result may not be the truly optimal match/literal
- * sequence, but it usually is much better than one chosen by doing a "greedy"
- * parse where we always chooe the longest match.
- *
- * An alternative to both greedy parsing and iterative, near-optimal parsing is
- * "lazy" parsing. Briefly, "lazy" parsing considers just the longest match at
- * each position, but it waits to choose that match until it has also examined
- * the next position. This is actually a useful approach; it's used by zlib,
- * for example. Therefore, for fast compression we combine lazy parsing with
- * the hash chain max-finder. For normal/high compression we combine
- * near-optimal parsing with the binary tree match-finder.
+ * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
+ * of match offsets. This is very useful for certain types of files, such as
+ * binary files that have repeating records.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-#include "wimlib/compressor_ops.h"
#include "wimlib/compress_common.h"
+#include "wimlib/compressor_ops.h"
+#include "wimlib/endianness.h"
#include "wimlib/error.h"
#include "wimlib/lz_mf.h"
+#include "wimlib/lz_repsearch.h"
#include "wimlib/lzx.h"
#include "wimlib/util.h"
+
#include <string.h>
+#include <limits.h>
#define LZX_OPTIM_ARRAY_LENGTH 4096
#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
-/* Codewords for the LZX main, length, and aligned offset Huffman codes */
+struct lzx_compressor;
+
+/* Codewords for the LZX Huffman codes. */
struct lzx_codewords {
u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u32 len[LZX_LENCODE_NUM_SYMBOLS];
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Codeword lengths (in bits) for the LZX main, length, and aligned offset
- * Huffman codes.
- *
- * A 0 length means the codeword has zero frequency.
- */
+/* Codeword lengths (in bits) for the LZX Huffman codes.
+ * A zero length means the corresponding codeword has zero frequency. */
struct lzx_lens {
u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u8 len[LZX_LENCODE_NUM_SYMBOLS];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Costs for the LZX main, length, and aligned offset Huffman symbols.
- *
- * If a codeword has zero frequency, it must still be assigned some nonzero cost
- * --- generally a high cost, since even if it gets used in the next iteration,
- * it probably will not be used very many times. */
+/* Estimated cost, in bits, to output each symbol in the LZX Huffman codes. */
struct lzx_costs {
u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u8 len[LZX_LENCODE_NUM_SYMBOLS];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* The LZX main, length, and aligned offset Huffman codes */
+/* Codewords and lengths for the LZX Huffman codes. */
struct lzx_codes {
struct lzx_codewords codewords;
struct lzx_lens lens;
};
-/* Tables for tallying symbol frequencies in the three LZX alphabets */
+/* Symbol frequency counters for the LZX Huffman codes. */
struct lzx_freqs {
u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u32 len[LZX_LENCODE_NUM_SYMBOLS];
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* LZX intermediate match/literal format */
+/* Intermediate LZX match/literal format */
struct lzx_item {
- /* Bit Description
- *
- * 31 1 if a match, 0 if a literal.
- *
- * 30-25 position slot. This can be at most 50, so it will fit in 6
- * bits.
- *
- * 8-24 position footer. This is the offset of the real formatted
- * offset from the position base. This can be at most 17 bits
- * (since lzx_extra_bits[LZX_MAX_POSITION_SLOTS - 1] is 17).
- *
- * 0-7 length of match, minus 2. This can be at most
- * (LZX_MAX_MATCH_LEN - 2) == 255, so it will fit in 8 bits. */
- u32 data;
-};
-
-/* Specification for an LZX block. */
-struct lzx_block_spec {
-
- /* One of the LZX_BLOCKTYPE_* constants indicating which type of this
- * block. */
- int block_type;
-
- /* 0-based position in the window at which this block starts. */
- u32 window_pos;
-
- /* The number of bytes of uncompressed data this block represents. */
- u32 block_size;
-
- /* The match/literal sequence for this block. */
- struct lzx_item *chosen_items;
-
- /* The length of the @chosen_items sequence. */
- u32 num_chosen_items;
- /* Huffman codes for this block. */
- struct lzx_codes codes;
+ /* Bits 0 - 9: Main symbol
+ * Bits 10 - 17: Length symbol
+ * Bits 18 - 22: Number of extra offset bits
+ * Bits 23+ : Extra offset bits */
+ u64 data;
};
-struct lzx_compressor;
-
+/* Internal compression parameters */
struct lzx_compressor_params {
- struct lz_match (*choose_item_func)(struct lzx_compressor *);
- enum lz_mf_algo mf_algo;
+ u32 (*choose_items_for_block)(struct lzx_compressor *, u32, u32);
u32 num_optim_passes;
+ enum lz_mf_algo mf_algo;
u32 min_match_length;
u32 nice_match_length;
u32 max_search_depth;
};
-/* State of the LZX compressor. */
-struct lzx_compressor {
+/*
+ * Match chooser position data:
+ *
+ * An array of these structures is used during the near-optimal match-choosing
+ * algorithm. They correspond to consecutive positions in the window and are
+ * used to keep track of the cost to reach each position, and the match/literal
+ * choices that need to be chosen to reach that position.
+ */
+struct lzx_mc_pos_data {
+
+ /* The cost, in bits, of the lowest-cost path that has been found to
+ * reach this position. This can change as progressively lower cost
+ * paths are found to reach this position. */
+ u32 cost;
+#define MC_INFINITE_COST UINT32_MAX
- /* The buffer of data to be compressed.
+ /* The match or literal that was taken to reach this position. This can
+ * change as progressively lower cost paths are found to reach this
+ * position.
+ *
+ * This variable is divided into two bitfields.
+ *
+ * Literals:
+ * Low bits are 1, high bits are the literal.
+ *
+ * Explicit offset matches:
+ * Low bits are the match length, high bits are the offset plus 2.
*
- * 0xe8 byte preprocessing is done directly on the data here before
- * further compression.
+ * Repeat offset matches:
+ * Low bits are the match length, high bits are the queue index.
+ */
+ u32 mc_item_data;
+#define MC_OFFSET_SHIFT 9
+#define MC_LEN_MASK ((1 << MC_OFFSET_SHIFT) - 1)
+
+ /* The state of the LZX recent match offsets queue at this position.
+ * This is filled in lazily, only after the minimum-cost path to this
+ * position is found.
*
- * Note that this compressor does *not* use a real sliding window!!!!
- * It's not needed in the WIM format, since every chunk is compressed
- * independently. This is by design, to allow random access to the
- * chunks. */
+ * Note: the way we handle this adaptive state in the "minimum-cost"
+ * parse is actually only an approximation. It's possible for the
+ * globally optimal, minimum cost path to contain a prefix, ending at a
+ * position, where that path prefix is *not* the minimum cost path to
+ * that position. This can happen if such a path prefix results in a
+ * different adaptive state which results in lower costs later. We do
+ * not solve this problem; we only consider the lowest cost to reach
+ * each position, which seems to be an acceptable approximation. */
+ struct lzx_lru_queue queue _aligned_attribute(16);
+
+} _aligned_attribute(16);
+
+/* State of the LZX compressor */
+struct lzx_compressor {
+
+ /* Internal compression parameters */
+ struct lzx_compressor_params params;
+
+ /* The preprocessed buffer of data being compressed */
u8 *cur_window;
/* Number of bytes of data to be compressed, which is the number of
* bytes of data in @cur_window that are actually valid. */
u32 cur_window_size;
- /* Allocated size of @cur_window. */
- u32 max_window_size;
+ /* log2 order of the LZX window size for LZ match offset encoding
+ * purposes. Will be >= LZX_MIN_WINDOW_ORDER and <=
+ * LZX_MAX_WINDOW_ORDER.
+ *
+ * Note: 1 << @window_order is normally equal to @max_window_size,
+ * a.k.a. the allocated size of @cur_window, but it will be greater than
+ * @max_window_size in the event that the compressor was created with a
+ * non-power-of-2 block size. (See lzx_get_window_order().) */
+ unsigned window_order;
+
+ /* Number of symbols in the main alphabet. This depends on
+ * @window_order, since @window_order determines the maximum possible
+ * offset. It does not, however, depend on the *actual* size of the
+ * current data buffer being processed, which might be less than 1 <<
+ * @window_order. */
+ unsigned num_main_syms;
- /* Compression parameters. */
- struct lzx_compressor_params params;
+ /* Lempel-Ziv match-finder */
+ struct lz_mf *mf;
+ /* Match-finder wrapper functions and data for near-optimal parsing.
+ *
+ * When doing more than one match-choosing pass over the data, matches
+ * found by the match-finder are cached to achieve a slight speedup when
+ * the same matches are needed on subsequent passes. This is suboptimal
+ * because different matches may be preferred with different cost
+ * models, but it is a very worthwhile speedup. */
unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
+ u32 match_window_pos;
+ u32 match_window_end;
+ struct lz_match *cached_matches;
+ struct lz_match *cache_ptr;
+ struct lz_match *cache_limit;
- /* Number of symbols in the main alphabet (depends on the
- * @max_window_size since it determines the maximum allowed offset). */
- unsigned num_main_syms;
+ /* Position data for near-optimal parsing. */
+ struct lzx_mc_pos_data optimum[LZX_OPTIM_ARRAY_LENGTH + LZX_MAX_MATCH_LEN];
+
+ /* The cost model currently being used for near-optimal parsing. */
+ struct lzx_costs costs;
/* The current match offset LRU queue. */
struct lzx_lru_queue queue;
- /* Space for the sequences of matches/literals that were chosen for each
- * block. */
- struct lzx_item *chosen_items;
+ /* Frequency counters for the current block. */
+ struct lzx_freqs freqs;
- /* Information about the LZX blocks the preprocessed input was divided
- * into. */
- struct lzx_block_spec *block_specs;
+ /* The Huffman codes for the current and previous blocks. */
+ struct lzx_codes codes[2];
- /* Number of LZX blocks the input was divided into; a.k.a. the number of
- * elements of @block_specs that are valid. */
- unsigned num_blocks;
+ /* Which 'struct lzx_codes' is being used for the current block. The
+ * other was used for the previous block (if this isn't the first
+ * block). */
+ unsigned int codes_index;
- /* This is simply filled in with zeroes and used to avoid special-casing
- * the output of the first compressed Huffman code, which conceptually
- * has a delta taken from a code with all symbols having zero-length
- * codewords. */
- struct lzx_codes zero_codes;
+ /* Dummy lengths that are always 0. */
+ struct lzx_lens zero_lens;
- /* The current cost model. */
- struct lzx_costs costs;
+ /* Matches/literals that were chosen for the current block. */
+ struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE];
- /* Lempel-Ziv match-finder. */
- struct lz_mf *mf;
+ /* Table mapping match offset => offset slot for small offsets */
+#define LZX_NUM_FAST_OFFSETS 32768
+ u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
+};
- /* Position in window of next match to return. */
- u32 match_window_pos;
+/*
+ * Structure to keep track of the current state of sending bits to the
+ * compressed output buffer.
+ *
+ * The LZX bitstream is encoded as a sequence of 16-bit coding units.
+ */
+struct lzx_output_bitstream {
- /* The end-of-block position. We can't allow any matches to span this
- * position. */
- u32 match_window_end;
+ /* Bits that haven't yet been written to the output buffer. */
+ u32 bitbuf;
- /* When doing more than one match-choosing pass over the data, matches
- * found by the match-finder are cached in the following array to
- * achieve a slight speedup when the same matches are needed on
- * subsequent passes. This is suboptimal because different matches may
- * be preferred with different cost models, but seems to be a worthwhile
- * speedup. */
- struct lz_match *cached_matches;
- struct lz_match *cache_ptr;
- struct lz_match *cache_limit;
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitcount;
- /* Match-chooser state, used when doing near-optimal parsing.
- *
- * When matches have been chosen, optimum_cur_idx is set to the position
- * in the window of the next match/literal to return and optimum_end_idx
- * is set to the position in the window at the end of the last
- * match/literal to return. */
- struct lzx_mc_pos_data *optimum;
- unsigned optimum_cur_idx;
- unsigned optimum_end_idx;
-
- /* Previous match, used when doing lazy parsing. */
- struct lz_match prev_match;
+ /* Pointer to the start of the output buffer. */
+ le16 *start;
+
+ /* Pointer to the position in the output buffer at which the next coding
+ * unit should be written. */
+ le16 *next;
+
+ /* Pointer past the end of the output buffer. */
+ le16 *end;
};
/*
- * Match chooser position data:
- *
- * An array of these structures is used during the match-choosing algorithm.
- * They correspond to consecutive positions in the window and are used to keep
- * track of the cost to reach each position, and the match/literal choices that
- * need to be chosen to reach that position.
+ * Initialize the output bitstream.
+ *
+ * @os
+ * The output bitstream structure to initialize.
+ * @buffer
+ * The buffer being written to.
+ * @size
+ * Size of @buffer, in bytes.
*/
-struct lzx_mc_pos_data {
- /* The approximate minimum cost, in bits, to reach this position in the
- * window which has been found so far. */
- u32 cost;
-#define MC_INFINITE_COST ((u32)~0UL)
-
- /* The union here is just for clarity, since the fields are used in two
- * slightly different ways. Initially, the @prev structure is filled in
- * first, and links go from later in the window to earlier in the
- * window. Later, @next structure is filled in and links go from
- * earlier in the window to later in the window. */
- union {
- struct {
- /* Position of the start of the match or literal that
- * was taken to get to this position in the approximate
- * minimum-cost parse. */
- u32 link;
-
- /* Offset (as in an LZ (length, offset) pair) of the
- * match or literal that was taken to get to this
- * position in the approximate minimum-cost parse. */
- u32 match_offset;
- } prev;
- struct {
- /* Position at which the match or literal starting at
- * this position ends in the minimum-cost parse. */
- u32 link;
-
- /* Offset (as in an LZ (length, offset) pair) of the
- * match or literal starting at this position in the
- * approximate minimum-cost parse. */
- u32 match_offset;
- } next;
- };
-
- /* Adaptive state that exists after an approximate minimum-cost path to
- * reach this position is taken. */
- struct lzx_lru_queue queue;
-};
+static void
+lzx_init_output(struct lzx_output_bitstream *os, void *buffer, u32 size)
+{
+ os->bitbuf = 0;
+ os->bitcount = 0;
+ os->start = buffer;
+ os->next = os->start;
+ os->end = os->start + size / sizeof(le16);
+}
-/* Returns the LZX position slot that corresponds to a given match offset,
- * taking into account the recent offset queue and updating it if the offset is
- * found in it. */
-static unsigned
-lzx_get_position_slot(u32 offset, struct lzx_lru_queue *queue)
+/*
+ * Write some bits to the output bitstream.
+ *
+ * The bits are given by the low-order @num_bits bits of @bits. Higher-order
+ * bits in @bits cannot be set. At most 17 bits can be written at once.
+ *
+ * @max_num_bits is a compile-time constant that specifies the maximum number of
+ * bits that can ever be written at the call site. Currently, it is used to
+ * optimize away the conditional code for writing a second 16-bit coding unit
+ * when writing fewer than 17 bits.
+ *
+ * If the output buffer space is exhausted, then the bits will be ignored, and
+ * lzx_flush_output() will return 0 when it gets called.
+ */
+static inline void
+lzx_write_varbits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits,
+ const unsigned int max_num_bits)
{
- unsigned position_slot;
-
- /* See if the offset was recently used. */
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- if (offset == queue->R[i]) {
- /* Found it. */
-
- /* Bring the repeat offset to the front of the
- * queue. Note: this is, in fact, not a real
- * LRU queue because repeat matches are simply
- * swapped to the front. */
- swap(queue->R[0], queue->R[i]);
-
- /* The resulting position slot is simply the first index
- * at which the offset was found in the queue. */
- return i;
+ /* This code is optimized for LZX, which never needs to write more than
+ * 17 bits at once. */
+ LZX_ASSERT(num_bits <= 17);
+ LZX_ASSERT(num_bits <= max_num_bits);
+ LZX_ASSERT(os->bitcount <= 15);
+
+ /* Add the bits to the bit buffer variable. @bitcount will be at most
+ * 15, so there will be just enough space for the maximum possible
+ * @num_bits of 17. */
+ os->bitcount += num_bits;
+ os->bitbuf = (os->bitbuf << num_bits) | bits;
+
+ /* Check whether any coding units need to be written. */
+ if (os->bitcount >= 16) {
+
+ os->bitcount -= 16;
+
+ /* Write a coding unit, unless it would overflow the buffer. */
+ if (os->next != os->end)
+ put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
+
+ /* If writing 17 bits, a second coding unit might need to be
+ * written. But because 'max_num_bits' is a compile-time
+ * constant, the compiler will optimize away this code at most
+ * call sites. */
+ if (max_num_bits == 17 && os->bitcount == 16) {
+ if (os->next != os->end)
+ put_unaligned_u16_le(os->bitbuf, os->next++);
+ os->bitcount = 0;
}
}
+}
+
+/* Use when @num_bits is a compile-time constant. Otherwise use
+ * lzx_write_varbits(). */
+static inline void
+lzx_write_bits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits)
+{
+ lzx_write_varbits(os, bits, num_bits, num_bits);
+}
- /* The offset was not recently used; look up its real position slot. */
- position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
+/*
+ * Flush the last coding unit to the output buffer if needed. Return the total
+ * number of bytes written to the output buffer, or 0 if an overflow occurred.
+ */
+static u32
+lzx_flush_output(struct lzx_output_bitstream *os)
+{
+ if (os->next == os->end)
+ return 0;
- /* Bring the new offset to the front of the queue. */
- for (int i = LZX_NUM_RECENT_OFFSETS - 1; i > 0; i--)
- queue->R[i] = queue->R[i - 1];
- queue->R[0] = offset;
+ if (os->bitcount != 0)
+ put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
- return position_slot;
+ return (const u8 *)os->next - (const u8 *)os->start;
}
/* Build the main, length, and aligned offset Huffman codes used in LZX.
* This takes as input the frequency tables for each code and produces as output
* a set of tables that map symbols to codewords and codeword lengths. */
static void
-lzx_make_huffman_codes(const struct lzx_freqs *freqs,
- struct lzx_codes *codes,
+lzx_make_huffman_codes(const struct lzx_freqs *freqs, struct lzx_codes *codes,
unsigned num_main_syms)
{
make_canonical_huffman_code(num_main_syms,
codes->codewords.aligned);
}
-/*
- * Output a precomputed LZX match.
- *
- * @out:
- * The bitstream to which to write the match.
- * @block_type:
- * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
- * LZX_BLOCKTYPE_VERBATIM)
- * @match:
- * The match data.
- * @codes:
- * Pointer to a structure that contains the codewords for the main, length,
- * and aligned offset Huffman codes for the current LZX compressed block.
- */
-static void
-lzx_write_match(struct output_bitstream *out, int block_type,
- struct lzx_item match, const struct lzx_codes *codes)
+static unsigned
+lzx_compute_precode_items(const u8 lens[restrict],
+ const u8 prev_lens[restrict],
+ const unsigned num_lens,
+ u32 precode_freqs[restrict],
+ unsigned precode_items[restrict])
{
- /* low 8 bits are the match length minus 2 */
- unsigned match_len_minus_2 = match.data & 0xff;
- /* Next 17 bits are the position footer */
- unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
- /* Next 6 bits are the position slot. */
- unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
- unsigned len_header;
- unsigned len_footer;
- unsigned main_symbol;
- unsigned num_extra_bits;
- unsigned verbatim_bits;
- unsigned aligned_bits;
-
- /* If the match length is less than MIN_MATCH_LEN (= 2) +
- * NUM_PRIMARY_LENS (= 7), the length header contains
- * the match length minus MIN_MATCH_LEN, and there is no
- * length footer.
- *
- * Otherwise, the length header contains
- * NUM_PRIMARY_LENS, and the length footer contains
- * the match length minus NUM_PRIMARY_LENS minus
- * MIN_MATCH_LEN. */
- if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
- len_header = match_len_minus_2;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_footer = match_len_minus_2 - LZX_NUM_PRIMARY_LENS;
- }
+ unsigned *itemptr;
+ unsigned run_start;
+ unsigned run_end;
+ unsigned extra_bits;
+ int delta;
+ u8 len;
+
+ itemptr = precode_items;
+ run_start = 0;
+ do {
+ /* Find the next run of codeword lengths. */
- /* Combine the position slot with the length header into a single symbol
- * that will be encoded with the main code.
- *
- * The actual main symbol is offset by LZX_NUM_CHARS because values
- * under LZX_NUM_CHARS are used to indicate a literal byte rather than a
- * match. */
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
-
- /* Output main symbol. */
- bitstream_put_bits(out, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol]);
-
- /* If there is a length footer, output it using the
- * length Huffman code. */
- if (len_header == LZX_NUM_PRIMARY_LENS)
- bitstream_put_bits(out, codes->codewords.len[len_footer],
- codes->lens.len[len_footer]);
-
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
-
- /* For aligned offset blocks with at least 3 extra bits, output the
- * verbatim bits literally, then the aligned bits encoded using the
- * aligned offset code. Otherwise, only the verbatim bits need to be
- * output. */
- if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
-
- verbatim_bits = position_footer >> 3;
- bitstream_put_bits(out, verbatim_bits,
- num_extra_bits - 3);
-
- aligned_bits = (position_footer & 7);
- bitstream_put_bits(out,
- codes->codewords.aligned[aligned_bits],
- codes->lens.aligned[aligned_bits]);
- } else {
- /* verbatim bits is the same as the position
- * footer, in this case. */
- bitstream_put_bits(out, position_footer, num_extra_bits);
- }
-}
+ /* len = the length being repeated */
+ len = lens[run_start];
-/* Output an LZX literal (encoded with the main Huffman code). */
-static void
-lzx_write_literal(struct output_bitstream *out, u8 literal,
- const struct lzx_codes *codes)
-{
- bitstream_put_bits(out,
- codes->codewords.main[literal],
- codes->lens.main[literal]);
-}
+ run_end = run_start + 1;
-static unsigned
-lzx_build_precode(const u8 lens[restrict],
- const u8 prev_lens[restrict],
- const unsigned num_syms,
- u32 precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
- u8 output_syms[restrict num_syms],
- u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
- u32 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
- unsigned *num_additional_bits_ret)
-{
- memset(precode_freqs, 0,
- LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
-
- /* Since the code word lengths use a form of RLE encoding, the goal here
- * is to find each run of identical lengths when going through them in
- * symbol order (including runs of length 1). For each run, as many
- * lengths are encoded using RLE as possible, and the rest are output
- * literally.
- *
- * output_syms[] will be filled in with the length symbols that will be
- * output, including RLE codes, not yet encoded using the precode.
- *
- * cur_run_len keeps track of how many code word lengths are in the
- * current run of identical lengths. */
- unsigned output_syms_idx = 0;
- unsigned cur_run_len = 1;
- unsigned num_additional_bits = 0;
- for (unsigned i = 1; i <= num_syms; i++) {
-
- if (i != num_syms && lens[i] == lens[i - 1]) {
- /* Still in a run--- keep going. */
- cur_run_len++;
+ /* Fast case for a single length. */
+ if (likely(run_end == num_lens || len != lens[run_end])) {
+ delta = prev_lens[run_start] - len;
+ if (delta < 0)
+ delta += 17;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
continue;
}
- /* Run ended! Check if it is a run of zeroes or a run of
- * nonzeroes. */
-
- /* The symbol that was repeated in the run--- not to be confused
- * with the length *of* the run (cur_run_len) */
- unsigned len_in_run = lens[i - 1];
-
- if (len_in_run == 0) {
- /* A run of 0's. Encode it in as few length
- * codes as we can. */
+ /* Extend the run. */
+ do {
+ run_end++;
+ } while (run_end != num_lens && len == lens[run_end]);
- /* The magic length 18 indicates a run of 20 + n zeroes,
- * where n is an uncompressed literal 5-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 20) {
- unsigned additional_bits;
+ if (len == 0) {
+ /* Run of zeroes. */
- additional_bits = min(cur_run_len - 20, 0x1f);
- num_additional_bits += 5;
+ /* Symbol 18: RLE 20 to 51 zeroes at a time. */
+ while ((run_end - run_start) >= 20) {
+ extra_bits = min((run_end - run_start) - 20, 0x1f);
precode_freqs[18]++;
- output_syms[output_syms_idx++] = 18;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 20 + additional_bits;
+ *itemptr++ = 18 | (extra_bits << 5);
+ run_start += 20 + extra_bits;
}
- /* The magic length 17 indicates a run of 4 + n zeroes,
- * where n is an uncompressed literal 4-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
-
- additional_bits = min(cur_run_len - 4, 0xf);
- num_additional_bits += 4;
+ /* Symbol 17: RLE 4 to 19 zeroes at a time. */
+ if ((run_end - run_start) >= 4) {
+ extra_bits = min((run_end - run_start) - 4, 0xf);
precode_freqs[17]++;
- output_syms[output_syms_idx++] = 17;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 4 + additional_bits;
+ *itemptr++ = 17 | (extra_bits << 5);
+ run_start += 4 + extra_bits;
}
-
} else {
/* A run of nonzero lengths. */
- /* The magic length 19 indicates a run of 4 + n
- * nonzeroes, where n is a literal bit that follows the
- * magic length, and where the value of the lengths in
- * the run is given by an extra length symbol, encoded
- * with the precode, that follows the literal bit.
- *
- * The extra length symbol is encoded as a difference
- * from the length of the codeword for the first symbol
- * in the run in the previous code.
- * */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
- signed char delta;
-
- additional_bits = (cur_run_len > 4);
- num_additional_bits += 1;
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Symbol 19: RLE 4 to 5 of any length at a time. */
+ while ((run_end - run_start) >= 4) {
+ extra_bits = (run_end - run_start) > 4;
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
precode_freqs[19]++;
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = 19;
- output_syms[output_syms_idx++] = additional_bits;
- output_syms[output_syms_idx++] = delta;
- cur_run_len -= 4 + additional_bits;
+ precode_freqs[delta]++;
+ *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
+ run_start += 4 + extra_bits;
}
}
- /* Any remaining lengths in the run are outputted without RLE,
- * as a difference from the length of that codeword in the
- * previous code. */
- while (cur_run_len > 0) {
- signed char delta;
-
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Output any remaining lengths without RLE. */
+ while (run_start != run_end) {
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
-
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = delta;
- cur_run_len--;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
}
+ } while (run_start != num_lens);
- cur_run_len = 1;
- }
-
- /* Build the precode from the frequencies of the length symbols. */
-
- make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
- precode_freqs, precode_lens,
- precode_codewords);
-
- *num_additional_bits_ret = num_additional_bits;
-
- return output_syms_idx;
+ return itemptr - precode_items;
}
/*
* as deltas from the codeword lengths of the corresponding code in the previous
* block.
*
- * @out:
+ * @os:
* Bitstream to which to write the compressed Huffman code.
* @lens:
* The codeword lengths, indexed by symbol, in the Huffman code.
* @prev_lens:
* The codeword lengths, indexed by symbol, in the corresponding Huffman
* code in the previous block, or all zeroes if this is the first block.
- * @num_syms:
+ * @num_lens:
* The number of symbols in the Huffman code.
*/
static void
-lzx_write_compressed_code(struct output_bitstream *out,
+lzx_write_compressed_code(struct lzx_output_bitstream *os,
const u8 lens[restrict],
const u8 prev_lens[restrict],
- unsigned num_syms)
+ unsigned num_lens)
{
u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
- u8 output_syms[num_syms];
u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
+ unsigned precode_items[num_lens];
+ unsigned num_precode_items;
+ unsigned precode_item;
+ unsigned precode_sym;
unsigned i;
- unsigned num_output_syms;
- u8 precode_sym;
- unsigned dummy;
-
- num_output_syms = lzx_build_precode(lens,
- prev_lens,
- num_syms,
- precode_freqs,
- output_syms,
- precode_lens,
- precode_codewords,
- &dummy);
-
- /* Write the lengths of the precode codes to the output. */
+
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(out, precode_lens[i],
- LZX_PRECODE_ELEMENT_SIZE);
+ precode_freqs[i] = 0;
- /* Write the length symbols, encoded with the precode, to the output. */
+ /* Compute the "items" (RLE / literal tokens and extra bits) with which
+ * the codeword lengths in the larger code will be output. */
+ num_precode_items = lzx_compute_precode_items(lens,
+ prev_lens,
+ num_lens,
+ precode_freqs,
+ precode_items);
- for (i = 0; i < num_output_syms; ) {
- precode_sym = output_syms[i++];
+ /* Build the precode. */
+ make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
+ LZX_MAX_PRE_CODEWORD_LEN,
+ precode_freqs, precode_lens,
+ precode_codewords);
- bitstream_put_bits(out, precode_codewords[precode_sym],
- precode_lens[precode_sym]);
- switch (precode_sym) {
- case 17:
- bitstream_put_bits(out, output_syms[i++], 4);
- break;
- case 18:
- bitstream_put_bits(out, output_syms[i++], 5);
- break;
- case 19:
- bitstream_put_bits(out, output_syms[i++], 1);
- bitstream_put_bits(out,
- precode_codewords[output_syms[i]],
- precode_lens[output_syms[i]]);
- i++;
- break;
- default:
- break;
+ /* Output the lengths of the codewords in the precode. */
+ for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
+ lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
+
+ /* Output the encoded lengths of the codewords in the larger code. */
+ for (i = 0; i < num_precode_items; i++) {
+ precode_item = precode_items[i];
+ precode_sym = precode_item & 0x1F;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ if (precode_sym >= 17) {
+ if (precode_sym == 17) {
+ lzx_write_bits(os, precode_item >> 5, 4);
+ } else if (precode_sym == 18) {
+ lzx_write_bits(os, precode_item >> 5, 5);
+ } else {
+ lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ precode_sym = precode_item >> 6;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ }
}
}
}
+/* Output a match or literal. */
+static inline void
+lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
+ unsigned ones_if_aligned, const struct lzx_codes *codes)
+{
+ u64 data = item.data;
+ unsigned main_symbol;
+ unsigned len_symbol;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ main_symbol = data & 0x3FF;
+
+ lzx_write_varbits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol],
+ LZX_MAX_MAIN_CODEWORD_LEN);
+
+ if (main_symbol < LZX_NUM_CHARS) /* Literal? */
+ return;
+
+ len_symbol = (data >> 10) & 0xFF;
+
+ if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
+ lzx_write_varbits(os, codes->codewords.len[len_symbol],
+ codes->lens.len[len_symbol],
+ LZX_MAX_LEN_CODEWORD_LEN);
+ }
+
+ num_extra_bits = (data >> 18) & 0x1F;
+ if (num_extra_bits == 0) /* Small offset or repeat offset match? */
+ return;
+
+ extra_bits = data >> 23;
+
+ /*if (block_type == LZX_BLOCKTYPE_ALIGNED && num_extra_bits >= 3) {*/
+ if ((num_extra_bits & ones_if_aligned) >= 3) {
+
+ /* Aligned offset blocks: The low 3 bits of the extra offset
+ * bits are Huffman-encoded using the aligned offset code. The
+ * remaining bits are output literally. */
+
+ lzx_write_varbits(os, extra_bits >> 3, num_extra_bits - 3, 14);
+
+ lzx_write_varbits(os, codes->codewords.aligned[extra_bits & 7],
+ codes->lens.aligned[extra_bits & 7],
+ LZX_MAX_ALIGNED_CODEWORD_LEN);
+ } else {
+ /* Verbatim blocks, or fewer than 3 extra bits: All extra
+ * offset bits are output literally. */
+ lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
+ }
+}
+
/*
* Write all matches and literal bytes (which were precomputed) in an LZX
* compressed block to the output bitstream in the final compressed
* representation.
*
- * @ostream
+ * @os
* The output bitstream.
* @block_type
* The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
* LZX compressed block.
*/
static void
-lzx_write_items(struct output_bitstream *ostream, int block_type,
+lzx_write_items(struct lzx_output_bitstream *os, int block_type,
const struct lzx_item items[], u32 num_items,
const struct lzx_codes *codes)
{
- for (u32 i = 0; i < num_items; i++) {
- /* The high bit of the 32-bit intermediate representation
- * indicates whether the item is an actual LZ-style match (1) or
- * a literal byte (0). */
- if (items[i].data & 0x80000000)
- lzx_write_match(ostream, block_type, items[i], codes);
- else
- lzx_write_literal(ostream, items[i].data, codes);
- }
+ unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
+
+ for (u32 i = 0; i < num_items; i++)
+ lzx_write_item(os, items[i], ones_if_aligned, codes);
}
-/* Write an LZX aligned offset or verbatim block to the output. */
+/* Write an LZX aligned offset or verbatim block to the output bitstream. */
static void
lzx_write_compressed_block(int block_type,
- unsigned block_size,
- unsigned max_window_size,
+ u32 block_size,
+ unsigned window_order,
unsigned num_main_syms,
struct lzx_item * chosen_items,
- unsigned num_chosen_items,
+ u32 num_chosen_items,
const struct lzx_codes * codes,
- const struct lzx_codes * prev_codes,
- struct output_bitstream * ostream)
+ const struct lzx_lens * prev_lens,
+ struct lzx_output_bitstream * os)
{
- unsigned i;
-
LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
block_type == LZX_BLOCKTYPE_VERBATIM);
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
- bitstream_put_bits(ostream, block_type, 3);
+ lzx_write_bits(os, block_type, 3);
/* Output the block size.
*
* because WIMs created with chunk size greater than 32768 can seemingly
* only be opened by wimlib anyway. */
if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
- bitstream_put_bits(ostream, 1, 1);
+ lzx_write_bits(os, 1, 1);
} else {
- bitstream_put_bits(ostream, 0, 1);
+ lzx_write_bits(os, 0, 1);
- if (max_window_size >= 65536)
- bitstream_put_bits(ostream, block_size >> 16, 8);
+ if (window_order >= 16)
+ lzx_write_bits(os, block_size >> 16, 8);
- bitstream_put_bits(ostream, block_size, 16);
+ lzx_write_bits(os, block_size & 0xFFFF, 16);
}
- /* Write out lengths of the main code. Note that the LZX specification
- * incorrectly states that the aligned offset code comes after the
- * length code, but in fact it is the very first code to be written
- * (before the main code). */
- if (block_type == LZX_BLOCKTYPE_ALIGNED)
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(ostream, codes->lens.aligned[i],
- LZX_ALIGNEDCODE_ELEMENT_SIZE);
-
- /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
- * the main code, which are the codewords for literal bytes. */
- lzx_write_compressed_code(ostream,
- codes->lens.main,
- prev_codes->lens.main,
- LZX_NUM_CHARS);
+ /* If it's an aligned offset block, output the aligned offset code. */
+ if (block_type == LZX_BLOCKTYPE_ALIGNED) {
+ for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ lzx_write_bits(os, codes->lens.aligned[i],
+ LZX_ALIGNEDCODE_ELEMENT_SIZE);
+ }
+ }
- /* Write the precode and lengths for the rest of the main code, which
- * are the codewords for match headers. */
- lzx_write_compressed_code(ostream,
- codes->lens.main + LZX_NUM_CHARS,
- prev_codes->lens.main + LZX_NUM_CHARS,
+ /* Output the main code (two parts). */
+ lzx_write_compressed_code(os, codes->lens.main,
+ prev_lens->main,
+ LZX_NUM_CHARS);
+ lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
+ prev_lens->main + LZX_NUM_CHARS,
num_main_syms - LZX_NUM_CHARS);
- /* Write the precode and lengths for the length code. */
- lzx_write_compressed_code(ostream,
- codes->lens.len,
- prev_codes->lens.len,
+ /* Output the length code. */
+ lzx_write_compressed_code(os, codes->lens.len,
+ prev_lens->len,
LZX_LENCODE_NUM_SYMBOLS);
- /* Write the actual matches and literals. */
- lzx_write_items(ostream, block_type,
- chosen_items, num_chosen_items, codes);
+ /* Output the compressed matches and literals. */
+ lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
}
-/* Write out the LZX blocks that were computed. */
-static void
-lzx_write_all_blocks(struct lzx_compressor *c, struct output_bitstream *ostream)
+/* Don't allow matches to span the end of an LZX block. */
+static inline unsigned
+maybe_truncate_matches(struct lz_match matches[], unsigned num_matches,
+ struct lzx_compressor *c)
{
+ if (c->match_window_end < c->cur_window_size && num_matches != 0) {
+ u32 limit = c->match_window_end - c->match_window_pos;
- const struct lzx_codes *prev_codes = &c->zero_codes;
- for (unsigned i = 0; i < c->num_blocks; i++) {
- const struct lzx_block_spec *spec = &c->block_specs[i];
-
- LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_items=%u)...",
- i + 1, c->num_blocks,
- spec->block_type, spec->block_size,
- spec->num_chosen_items);
+ if (limit >= LZX_MIN_MATCH_LEN) {
- lzx_write_compressed_block(spec->block_type,
- spec->block_size,
- c->max_window_size,
- c->num_main_syms,
- spec->chosen_items,
- spec->num_chosen_items,
- &spec->codes,
- prev_codes,
- ostream);
+ unsigned i = num_matches - 1;
+ do {
+ if (matches[i].len >= limit) {
+ matches[i].len = limit;
- prev_codes = &spec->codes;
+ /* Truncation might produce multiple
+ * matches with length 'limit'. Keep at
+ * most 1. */
+ num_matches = i + 1;
+ }
+ } while (i--);
+ } else {
+ num_matches = 0;
+ }
}
+ return num_matches;
}
-/* Constructs an LZX match from a literal byte and updates the main code symbol
- * frequencies. */
-static inline u32
-lzx_tally_literal(u8 lit, struct lzx_freqs *freqs)
+static unsigned
+lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
+ const struct lz_match **matches_ret)
{
- freqs->main[lit]++;
- return (u32)lit;
-}
+ struct lz_match *cache_ptr;
+ struct lz_match *matches;
+ unsigned num_matches;
-/* Constructs an LZX match from an offset and a length, and updates the LRU
- * queue and the frequency of symbols in the main, length, and aligned offset
- * alphabets. The return value is a 32-bit number that provides the match in an
- * intermediate representation documented below. */
-static inline u32
-lzx_tally_match(unsigned match_len, u32 match_offset,
- struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
-{
- unsigned position_slot;
- unsigned position_footer;
- u32 len_header;
- unsigned main_symbol;
- unsigned len_footer;
- unsigned adjusted_match_len;
-
- LZX_ASSERT(match_len >= LZX_MIN_MATCH_LEN && match_len <= LZX_MAX_MATCH_LEN);
-
- /* The match offset shall be encoded as a position slot (itself encoded
- * as part of the main symbol) and a position footer. */
- position_slot = lzx_get_position_slot(match_offset, queue);
- position_footer = (match_offset + LZX_OFFSET_OFFSET) &
- ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
-
- /* The match length shall be encoded as a length header (itself encoded
- * as part of the main symbol) and an optional length footer. */
- adjusted_match_len = match_len - LZX_MIN_MATCH_LEN;
- if (adjusted_match_len < LZX_NUM_PRIMARY_LENS) {
- /* No length footer needed. */
- len_header = adjusted_match_len;
+ cache_ptr = c->cache_ptr;
+ matches = cache_ptr + 1;
+ if (likely(cache_ptr <= c->cache_limit)) {
+ num_matches = lz_mf_get_matches(c->mf, matches);
+ cache_ptr->len = num_matches;
+ c->cache_ptr = matches + num_matches;
} else {
- /* Length footer needed. It will be encoded using the length
- * code. */
- len_header = LZX_NUM_PRIMARY_LENS;
- len_footer = adjusted_match_len - LZX_NUM_PRIMARY_LENS;
- freqs->len[len_footer]++;
+ num_matches = 0;
}
-
- /* Account for the main symbol. */
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
-
- freqs->main[main_symbol]++;
-
- /* In an aligned offset block, 3 bits of the position footer are output
- * as an aligned offset symbol. Account for this, although we may
- * ultimately decide to output the block as verbatim. */
-
- /* The following check is equivalent to:
- *
- * if (lzx_extra_bits[position_slot] >= 3)
- *
- * Note that this correctly excludes position slots that correspond to
- * recent offsets. */
- if (position_slot >= 8)
- freqs->aligned[position_footer & 7]++;
-
- /* Pack the position slot, position footer, and match length into an
- * intermediate representation. See `struct lzx_item' for details.
- */
- LZX_ASSERT(LZX_MAX_POSITION_SLOTS <= 64);
- LZX_ASSERT(lzx_get_num_extra_bits(LZX_MAX_POSITION_SLOTS - 1) <= 17);
- LZX_ASSERT(LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1 <= 256);
-
- LZX_ASSERT(position_slot <= (1U << (31 - 25)) - 1);
- LZX_ASSERT(position_footer <= (1U << (25 - 8)) - 1);
- LZX_ASSERT(adjusted_match_len <= (1U << (8 - 0)) - 1);
- return 0x80000000 |
- (position_slot << 25) |
- (position_footer << 8) |
- (adjusted_match_len);
-}
-
-/* Returns the cost, in bits, to output a literal byte using the specified cost
- * model. */
-static u32
-lzx_literal_cost(u8 c, const struct lzx_costs * costs)
-{
- return costs->main[c];
-}
-
-/* Given a (length, offset) pair that could be turned into a valid LZX match as
- * well as costs for the codewords in the main, length, and aligned Huffman
- * codes, return the approximate number of bits it will take to represent this
- * match in the compressed output. Take into account the match offset LRU
- * queue and also updates it. */
-static u32
-lzx_match_cost(unsigned length, u32 offset, const struct lzx_costs *costs,
- struct lzx_lru_queue *queue)
-{
- unsigned position_slot;
- unsigned len_header, main_symbol;
- unsigned num_extra_bits;
- u32 cost = 0;
-
- position_slot = lzx_get_position_slot(offset, queue);
-
- len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
-
- /* Account for main symbol. */
- cost += costs->main[main_symbol];
-
- /* Account for extra position information. */
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
- if (num_extra_bits >= 3) {
- cost += num_extra_bits - 3;
- cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
- } else {
- cost += num_extra_bits;
- }
-
- /* Account for extra length information. */
- if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
-
- return cost;
-
-}
-
-
-/* Set the cost model @c->costs from the Huffman codeword lengths specified in
- * @lens.
- *
- * The cost model and codeword lengths are almost the same thing, but the
- * Huffman codewords with length 0 correspond to symbols with zero frequency
- * that still need to be assigned actual costs. The specific values assigned
- * are arbitrary, but they should be fairly high (near the maximum codeword
- * length) to take into account the fact that uses of these symbols are expected
- * to be rare. */
-static void
-lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens,
- unsigned nostat)
-{
- unsigned i;
-
- /* Main code */
- for (i = 0; i < c->num_main_syms; i++)
- c->costs.main[i] = lens->main[i] ? lens->main[i] : nostat;
-
- /* Length code */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = lens->len[i] ? lens->len[i] : nostat;
-
- /* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : nostat / 2;
-}
-
-/* Don't allow matches to span the end of an LZX block. */
-static inline u32
-maybe_truncate_matches(struct lz_match matches[], u32 num_matches,
- struct lzx_compressor *c)
-{
- if (c->match_window_end < c->cur_window_size && num_matches != 0) {
- u32 limit = c->match_window_end - c->match_window_pos;
-
- if (limit >= LZX_MIN_MATCH_LEN) {
-
- u32 i = num_matches - 1;
- do {
- if (matches[i].len >= limit) {
- matches[i].len = limit;
-
- /* Truncation might produce multiple
- * matches with length 'limit'. Keep at
- * most 1. */
- num_matches = i + 1;
- }
- } while (i--);
- } else {
- num_matches = 0;
- }
- }
- return num_matches;
-}
-
-static unsigned
-lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
-{
- struct lz_match *cache_ptr;
- struct lz_match *matches;
- unsigned num_matches;
-
- cache_ptr = c->cache_ptr;
- matches = cache_ptr + 1;
- if (likely(cache_ptr <= c->cache_limit)) {
- num_matches = lz_mf_get_matches(c->mf, matches);
- cache_ptr->len = num_matches;
- c->cache_ptr = matches + num_matches;
- } else {
- num_matches = 0;
- }
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
-}
+ c->match_window_pos++;
+ *matches_ret = matches;
+ return num_matches;
+}
static unsigned
lzx_get_matches_fillcache_multiblock(struct lzx_compressor *c,
/*
* Find matches at the next position in the window.
*
+ * This uses a wrapper function around the underlying match-finder.
+ *
* Returns the number of matches found and sets *matches_ret to point to the
* matches array. The matches will be sorted by strictly increasing length and
* offset.
*/
static inline unsigned
-lzx_get_matches(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+lzx_get_matches(struct lzx_compressor *c, const struct lz_match **matches_ret)
{
return (*c->get_matches_func)(c, matches_ret);
}
/*
* Skip the specified number of positions in the window (don't search for
* matches at them).
+ *
+ * This uses a wrapper function around the underlying match-finder.
*/
static inline void
lzx_skip_bytes(struct lzx_compressor *c, unsigned n)
return (*c->skip_bytes_func)(c, n);
}
-/*
- * Reverse the linked list of near-optimal matches so that they can be returned
- * in forwards order.
- *
- * Returns the first match in the list.
- */
-static struct lz_match
-lzx_match_chooser_reverse_list(struct lzx_compressor *c, unsigned cur_pos)
+/* Tally, and optionally record, the specified literal byte. */
+static inline void
+lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
+ struct lzx_item **next_chosen_item)
{
- unsigned prev_link, saved_prev_link;
- unsigned prev_match_offset, saved_prev_match_offset;
+ unsigned main_symbol = literal;
- c->optimum_end_idx = cur_pos;
+ c->freqs.main[main_symbol]++;
- saved_prev_link = c->optimum[cur_pos].prev.link;
- saved_prev_match_offset = c->optimum[cur_pos].prev.match_offset;
+ if (next_chosen_item) {
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = main_symbol,
+ };
+ }
+}
- do {
- prev_link = saved_prev_link;
- prev_match_offset = saved_prev_match_offset;
+/* Tally, and optionally record, the specified repeat offset match. */
+static inline void
+lzx_declare_repeat_offset_match(struct lzx_compressor *c,
+ unsigned len, unsigned rep_index,
+ struct lzx_item **next_chosen_item)
+{
+ unsigned len_header;
+ unsigned main_symbol;
+ unsigned len_symbol;
+
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ len_symbol = LZX_LENCODE_NUM_SYMBOLS;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
+ c->freqs.len[len_symbol]++;
+ }
+
+ main_symbol = LZX_NUM_CHARS + ((rep_index << 3) | len_header);
+
+ c->freqs.main[main_symbol]++;
+
+ if (next_chosen_item) {
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = (u64)main_symbol | ((u64)len_symbol << 10),
+ };
+ }
+}
- saved_prev_link = c->optimum[prev_link].prev.link;
- saved_prev_match_offset = c->optimum[prev_link].prev.match_offset;
+/* Tally, and optionally record, the specified explicit offset match. */
+static inline void
+lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
+ struct lzx_item **next_chosen_item)
+{
+ unsigned len_header;
+ unsigned main_symbol;
+ unsigned len_symbol;
+ unsigned offset_slot;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ len_symbol = LZX_LENCODE_NUM_SYMBOLS;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
+ c->freqs.len[len_symbol]++;
+ }
- c->optimum[prev_link].next.link = cur_pos;
- c->optimum[prev_link].next.match_offset = prev_match_offset;
+ offset_slot = lzx_get_offset_slot_raw(offset + LZX_OFFSET_OFFSET);
- cur_pos = prev_link;
- } while (cur_pos != 0);
+ main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
- c->optimum_cur_idx = c->optimum[0].next.link;
+ c->freqs.main[main_symbol]++;
- return (struct lz_match)
- { .len = c->optimum_cur_idx,
- .offset = c->optimum[0].next.match_offset,
+ if (offset_slot >= 8)
+ c->freqs.aligned[(offset + LZX_OFFSET_OFFSET) & 7]++;
+
+ if (next_chosen_item) {
+
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
+
+ extra_bits = (offset + LZX_OFFSET_OFFSET) -
+ lzx_offset_slot_base[offset_slot];
+
+ *(*next_chosen_item)++ = (struct lzx_item) {
+ .data = (u64)main_symbol |
+ ((u64)len_symbol << 10) |
+ ((u64)num_extra_bits << 18) |
+ ((u64)extra_bits << 23),
};
+ }
}
-/*
- * lzx_choose_near_optimal_match() -
- *
- * Choose an approximately optimal match or literal to use at the next position
- * in the string, or "window", being LZ-encoded.
- *
- * This is based on algorithms used in 7-Zip, including the DEFLATE encoder
- * and the LZMA encoder, written by Igor Pavlov.
- *
- * Unlike a greedy parser that always takes the longest match, or even a "lazy"
- * parser with one match/literal look-ahead like zlib, the algorithm used here
- * may look ahead many matches/literals to determine the approximately optimal
- * match/literal to code next. The motivation is that the compression ratio is
- * improved if the compressor can do things like use a shorter-than-possible
- * match in order to allow a longer match later, and also take into account the
- * estimated real cost of coding each match/literal based on the underlying
- * entropy encoding.
- *
- * Still, this is not a true optimal parser for several reasons:
- *
- * - Real compression formats use entropy encoding of the literal/match
- * sequence, so the real cost of coding each match or literal is unknown until
- * the parse is fully determined. It can be approximated based on iterative
- * parses, but the end result is not guaranteed to be globally optimal.
- *
- * - Very long matches are chosen immediately. This is because locations with
- * long matches are likely to have many possible alternatives that would cause
- * slow optimal parsing, but also such locations are already highly
- * compressible so it is not too harmful to just grab the longest match.
- *
- * - Not all possible matches at each location are considered because the
- * underlying match-finder limits the number and type of matches produced at
- * each position. For example, for a given match length it's usually not
- * worth it to only consider matches other than the lowest-offset match,
- * except in the case of a repeat offset.
- *
- * - Although we take into account the adaptive state (in LZX, the recent offset
- * queue), coding decisions made with respect to the adaptive state will be
- * locally optimal but will not necessarily be globally optimal. This is
- * because the algorithm only keeps the least-costly path to get to a given
- * location and does not take into account that a slightly more costly path
- * could result in a different adaptive state that ultimately results in a
- * lower global cost.
- *
- * - The array space used by this function is bounded, so in degenerate cases it
- * is forced to start returning matches/literals before the algorithm has
- * really finished.
- *
- * Each call to this function does one of two things:
- *
- * 1. Build a sequence of near-optimal matches/literals, up to some point, that
- * will be returned by subsequent calls to this function, then return the
- * first one.
- *
- * OR
- *
- * 2. Return the next match/literal previously computed by a call to this
- * function.
+/* Tally, and optionally record, the specified match or literal. */
+static inline void
+lzx_declare_item(struct lzx_compressor *c, u32 mc_item_data,
+ struct lzx_item **next_chosen_item)
+{
+ u32 len = mc_item_data & MC_LEN_MASK;
+ u32 offset_data = mc_item_data >> MC_OFFSET_SHIFT;
+
+ if (len == 1)
+ lzx_declare_literal(c, offset_data, next_chosen_item);
+ else if (offset_data < LZX_NUM_RECENT_OFFSETS)
+ lzx_declare_repeat_offset_match(c, len, offset_data,
+ next_chosen_item);
+ else
+ lzx_declare_explicit_offset_match(c, len,
+ offset_data - LZX_OFFSET_OFFSET,
+ next_chosen_item);
+}
+
+static inline void
+lzx_record_item_list(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ struct lzx_item **next_chosen_item)
+{
+ struct lzx_mc_pos_data *end_optimum_ptr;
+ u32 saved_item;
+ u32 item;
+
+ /* The list is currently in reverse order (last item to first item).
+ * Reverse it. */
+ end_optimum_ptr = cur_optimum_ptr;
+ saved_item = cur_optimum_ptr->mc_item_data;
+ do {
+ item = saved_item;
+ cur_optimum_ptr -= item & MC_LEN_MASK;
+ saved_item = cur_optimum_ptr->mc_item_data;
+ cur_optimum_ptr->mc_item_data = item;
+ } while (cur_optimum_ptr != c->optimum);
+
+ /* Walk the list of items from beginning to end, tallying and recording
+ * each item. */
+ do {
+ lzx_declare_item(c, cur_optimum_ptr->mc_item_data, next_chosen_item);
+ cur_optimum_ptr += (cur_optimum_ptr->mc_item_data) & MC_LEN_MASK;
+ } while (cur_optimum_ptr != end_optimum_ptr);
+}
+
+static inline void
+lzx_tally_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr)
+{
+ /* Since we're just tallying the items, we don't need to reverse the
+ * list. Processing the items in reverse order is fine. */
+ do {
+ lzx_declare_item(c, cur_optimum_ptr->mc_item_data, NULL);
+ cur_optimum_ptr -= (cur_optimum_ptr->mc_item_data & MC_LEN_MASK);
+ } while (cur_optimum_ptr != c->optimum);
+}
+
+/* Tally, and optionally (if next_chosen_item != NULL) record, in order, all
+ * items in the current list of items found by the match-chooser. */
+static void
+lzx_declare_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr,
+ struct lzx_item **next_chosen_item)
+{
+ if (next_chosen_item)
+ lzx_record_item_list(c, cur_optimum_ptr, next_chosen_item);
+ else
+ lzx_tally_item_list(c, cur_optimum_ptr);
+}
+
+/* Set the cost model @c->costs from the Huffman codeword lengths specified in
+ * @lens.
*
- * The return value is a (length, offset) pair specifying the match or literal
- * chosen. For literals, the length is 0 or 1 and the offset is meaningless.
- */
-static struct lz_match
-lzx_choose_near_optimal_item(struct lzx_compressor *c)
+ * The cost model and codeword lengths are almost the same thing, but the
+ * Huffman codewords with length 0 correspond to symbols with zero frequency
+ * that still need to be assigned actual costs. The specific values assigned
+ * are arbitrary, but they should be fairly high (near the maximum codeword
+ * length) to take into account the fact that uses of these symbols are expected
+ * to be rare. */
+static void
+lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens)
{
- unsigned num_matches;
- const struct lz_match *matches;
- struct lz_match match;
- unsigned longest_len;
- unsigned longest_rep_len;
- u32 longest_rep_offset;
- unsigned cur_pos;
- unsigned end_pos;
-
- if (c->optimum_cur_idx != c->optimum_end_idx) {
- /* Case 2: Return the next match/literal already found. */
- match.len = c->optimum[c->optimum_cur_idx].next.link -
- c->optimum_cur_idx;
- match.offset = c->optimum[c->optimum_cur_idx].next.match_offset;
-
- c->optimum_cur_idx = c->optimum[c->optimum_cur_idx].next.link;
- return match;
+ unsigned i;
+
+ /* Main code */
+ for (i = 0; i < c->num_main_syms; i++)
+ c->costs.main[i] = lens->main[i] ? lens->main[i] : 15;
+
+ /* Length code */
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
+ c->costs.len[i] = lens->len[i] ? lens->len[i] : 15;
+
+ /* Aligned offset code */
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
+ c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : 7;
+}
+
+/* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
+ * algorithm. */
+static void
+lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
+{
+ unsigned i;
+
+ /* Main code (part 1): Literal symbols */
+ for (i = 0; i < LZX_NUM_CHARS; i++)
+ costs->main[i] = 8;
+
+ /* Main code (part 2): Match header symbols */
+ for (; i < num_main_syms; i++)
+ costs->main[i] = 10;
+
+ /* Length code */
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
+ costs->len[i] = 8;
+
+ /* Aligned offset code */
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
+ costs->aligned[i] = 3;
+}
+
+/* Return the cost, in bits, to output a literal byte using the specified cost
+ * model. */
+static inline u32
+lzx_literal_cost(unsigned literal, const struct lzx_costs * costs)
+{
+ return costs->main[literal];
+}
+
+/* Return the cost, in bits, to output a match of the specified length and
+ * offset slot using the specified cost model. Does not take into account
+ * extra offset bits. */
+static inline u32
+lzx_match_cost_raw(unsigned len, unsigned offset_slot,
+ const struct lzx_costs *costs)
+{
+ u32 cost;
+ unsigned len_header;
+ unsigned main_symbol;
+
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ cost = 0;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+
+ /* Account for length symbol. */
+ cost = costs->len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
}
- /* Case 1: Compute a new list of matches/literals to return. */
-
- c->optimum_cur_idx = 0;
- c->optimum_end_idx = 0;
-
- /* Search for matches at recent offsets. Only keep the one with the
- * longest match length. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- if (c->match_window_pos >= 1) {
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- c->match_window_end - c->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = c->queue.R[i];
- const u8 *strptr = &c->cur_window[c->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
+ /* Account for main symbol. */
+ main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
+ cost += costs->main[main_symbol];
+
+ return cost;
+}
+
+/* Equivalent to lzx_match_cost_raw(), but assumes the length is small enough
+ * that it doesn't require a length symbol. */
+static inline u32
+lzx_match_cost_raw_smalllen(unsigned len, unsigned offset_slot,
+ const struct lzx_costs *costs)
+{
+ LZX_ASSERT(len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
+ return costs->main[LZX_NUM_CHARS +
+ ((offset_slot << 3) | (len - LZX_MIN_MATCH_LEN))];
+}
+
+/*
+ * Consider coding the match at repeat offset index @rep_idx. Consider each
+ * length from the minimum (2) to the full match length (@rep_len).
+ */
+static inline void
+lzx_consider_repeat_offset_match(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ unsigned rep_len, unsigned rep_idx)
+{
+ u32 base_cost = cur_optimum_ptr->cost;
+ u32 cost;
+ unsigned len;
+
+#if 1 /* Optimized version */
+
+ if (rep_len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS) {
+ /* All lengths being considered are small. */
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
}
- }
- }
+ } while (++len <= rep_len);
+ } else {
+ /* Some lengths being considered are small, and some are big.
+ * Start with the optimized loop for small lengths, then switch
+ * to the optimized loop for big lengths. */
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
- /* If there's a long match with a recent offset, take it. */
- if (longest_rep_len >= c->params.nice_match_length) {
- lzx_skip_bytes(c, longest_rep_len);
- return (struct lz_match) {
- .len = longest_rep_len,
- .offset = longest_rep_offset,
- };
+ /* The main symbol is now fixed. */
+ base_cost += c->costs.main[LZX_NUM_CHARS +
+ ((rep_idx << 3) | LZX_NUM_PRIMARY_LENS)];
+ do {
+ cost = base_cost +
+ c->costs.len[len - LZX_MIN_MATCH_LEN -
+ LZX_NUM_PRIMARY_LENS];
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len <= rep_len);
}
- /* Search other matches. */
- num_matches = lzx_get_matches(c, &matches);
+#else /* Unoptimized version */
+
+ len = 2;
+ do {
+ cost = base_cost +
+ lzx_match_cost_raw(len, rep_idx, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->mc_item_data =
+ (rep_idx << MC_OFFSET_SHIFT) | len;
+ (cur_optimum_ptr + len)->cost = cost;
+ }
+ } while (++len <= rep_len);
+#endif
+}
+
+/*
+ * Consider coding each match in @matches as an explicit offset match.
+ *
+ * @matches must be sorted by strictly increasing length and strictly
+ * increasing offset. This is guaranteed by the match-finder.
+ *
+ * We consider each length from the minimum (2) to the longest
+ * (matches[num_matches - 1].len). For each length, we consider only
+ * the smallest offset for which that length is available. Although
+ * this is not guaranteed to be optimal due to the possibility of a
+ * larger offset costing less than a smaller offset to code, this is a
+ * very useful heuristic.
+ */
+static inline void
+lzx_consider_explicit_offset_matches(struct lzx_compressor *c,
+ struct lzx_mc_pos_data *cur_optimum_ptr,
+ const struct lz_match matches[],
+ unsigned num_matches)
+{
+ LZX_ASSERT(num_matches > 0);
+
+ unsigned i;
+ unsigned len;
+ unsigned offset_slot;
+ u32 position_cost;
+ u32 cost;
+ u32 offset_data;
+
+
+#if 1 /* Optimized version */
+
+ if (matches[num_matches - 1].offset < LZX_NUM_FAST_OFFSETS) {
+
+ /*
+ * Offset is small; the offset slot can be looked up directly in
+ * c->offset_slot_fast.
+ *
+ * Additional optimizations:
+ *
+ * - Since the offset is small, it falls in the exponential part
+ * of the offset slot bases and the number of extra offset
+ * bits can be calculated directly as (offset_slot >> 1) - 1.
+ *
+ * - Just consider the number of extra offset bits; don't
+ * account for the aligned offset code. Usually this has
+ * almost no effect on the compression ratio.
+ *
+ * - Start out in a loop optimized for small lengths. When the
+ * length becomes high enough that a length symbol will be
+ * needed, jump into a loop optimized for big lengths.
+ */
+
+ LZX_ASSERT(offset_slot <= 37); /* for extra bits formula */
+
+ len = 2;
+ i = 0;
+ do {
+ offset_slot = c->offset_slot_fast[matches[i].offset];
+ position_cost = cur_optimum_ptr->cost +
+ ((offset_slot >> 1) - 1);
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ do {
+ if (len >= LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS)
+ goto biglen;
+ cost = position_cost +
+ lzx_match_cost_raw_smalllen(len, offset_slot,
+ &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
+
+ return;
- /* If there's a long match, take it. */
- if (num_matches) {
- longest_len = matches[num_matches - 1].len;
- if (longest_len >= c->params.nice_match_length) {
- lzx_skip_bytes(c, longest_len - 1);
- return matches[num_matches - 1];
- }
+ do {
+ offset_slot = c->offset_slot_fast[matches[i].offset];
+ biglen:
+ position_cost = cur_optimum_ptr->cost +
+ ((offset_slot >> 1) - 1) +
+ c->costs.main[LZX_NUM_CHARS +
+ ((offset_slot << 3) |
+ LZX_NUM_PRIMARY_LENS)];
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ do {
+ cost = position_cost +
+ c->costs.len[len - LZX_MIN_MATCH_LEN -
+ LZX_NUM_PRIMARY_LENS];
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
} else {
- longest_len = 1;
+ len = 2;
+ i = 0;
+ do {
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ offset_slot = lzx_get_offset_slot_raw(offset_data);
+ position_cost = cur_optimum_ptr->cost +
+ lzx_extra_offset_bits[offset_slot];
+ do {
+ cost = position_cost +
+ lzx_match_cost_raw(len, offset_slot, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
+ } while (++len <= matches[i].len);
+ } while (++i != num_matches);
}
- /* Calculate the cost to reach the next position by coding a literal.
- */
- c->optimum[1].queue = c->queue;
- c->optimum[1].cost = lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
- &c->costs);
- c->optimum[1].prev.link = 0;
+#else /* Unoptimized version */
- /* Calculate the cost to reach any position up to and including that
- * reached by the longest match.
- *
- * Note: We consider only the lowest-offset match that reaches each
- * position.
- *
- * Note: Some of the cost calculation stays the same for each offset,
- * regardless of how many lengths it gets used for. Therefore, to
- * improve performance, we hand-code the cost calculation instead of
- * calling lzx_match_cost() to do a from-scratch cost evaluation at each
- * length. */
- for (unsigned i = 0, len = 2; i < num_matches; i++) {
- u32 offset;
- struct lzx_lru_queue queue;
- u32 position_cost;
- unsigned position_slot;
- unsigned num_extra_bits;
-
- offset = matches[i].offset;
- queue = c->queue;
- position_cost = 0;
-
- position_slot = lzx_get_position_slot(offset, &queue);
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
+ unsigned num_extra_bits;
+
+ len = 2;
+ i = 0;
+ do {
+ offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
+ position_cost = cur_optimum_ptr->cost;
+ offset_slot = lzx_get_offset_slot_raw(offset_data);
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
if (num_extra_bits >= 3) {
position_cost += num_extra_bits - 3;
- position_cost += c->costs.aligned[(offset + LZX_OFFSET_OFFSET) & 7];
+ position_cost += c->costs.aligned[offset_data & 7];
} else {
position_cost += num_extra_bits;
}
-
do {
- unsigned len_header;
- unsigned main_symbol;
- u32 cost;
-
- cost = position_cost;
-
- len_header = min(len - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
- main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
- cost += c->costs.main[main_symbol];
- if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += c->costs.len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
-
- c->optimum[len].queue = queue;
- c->optimum[len].prev.link = 0;
- c->optimum[len].prev.match_offset = offset;
- c->optimum[len].cost = cost;
+ cost = position_cost +
+ lzx_match_cost_raw(len, offset_slot, &c->costs);
+ if (cost < (cur_optimum_ptr + len)->cost) {
+ (cur_optimum_ptr + len)->cost = cost;
+ (cur_optimum_ptr + len)->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) | len;
+ }
} while (++len <= matches[i].len);
- }
- end_pos = longest_len;
-
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
- u32 cost;
-
- while (end_pos < longest_rep_len)
- c->optimum[++end_pos].cost = MC_INFINITE_COST;
-
- queue = c->queue;
- cost = lzx_match_cost(longest_rep_len, longest_rep_offset,
- &c->costs, &queue);
- if (cost <= c->optimum[longest_rep_len].cost) {
- c->optimum[longest_rep_len].queue = queue;
- c->optimum[longest_rep_len].prev.link = 0;
- c->optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
- c->optimum[longest_rep_len].cost = cost;
- }
- }
+ } while (++i != num_matches);
+#endif
+}
- /* Step forward, calculating the estimated minimum cost to reach each
- * position. The algorithm may find multiple paths to reach each
- * position; only the lowest-cost path is saved.
- *
- * The progress of the parse is tracked in the @c->optimum array, which
- * for each position contains the minimum cost to reach that position,
- * the index of the start of the match/literal taken to reach that
- * position through the minimum-cost path, the offset of the match taken
- * (not relevant for literals), and the adaptive state that will exist
- * at that position after the minimum-cost path is taken. The @cur_pos
- * variable stores the position at which the algorithm is currently
- * considering coding choices, and the @end_pos variable stores the
- * greatest position at which the costs of coding choices have been
- * saved. (Actually, the algorithm guarantees that all positions up to
- * and including @end_pos are reachable by at least one path.)
- *
- * The loop terminates when any one of the following conditions occurs:
- *
- * 1. A match with length greater than or equal to @nice_match_length is
- * found. When this occurs, the algorithm chooses this match
- * unconditionally, and consequently the near-optimal match/literal
- * sequence up to and including that match is fully determined and it
- * can begin returning the match/literal list.
- *
- * 2. @cur_pos reaches a position not overlapped by a preceding match.
- * In such cases, the near-optimal match/literal sequence up to
- * @cur_pos is fully determined and it can begin returning the
- * match/literal list.
- *
- * 3. Failing either of the above in a degenerate case, the loop
- * terminates when space in the @c->optimum array is exhausted.
- * This terminates the algorithm and forces it to start returning
- * matches/literals even though they may not be globally optimal.
+/*
+ * Search for repeat offset matches with the current position.
+ */
+static inline unsigned
+lzx_repsearch(const u8 * const strptr, const u32 bytes_remaining,
+ const struct lzx_lru_queue *queue, unsigned *rep_max_idx_ret)
+{
+ BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
+ return lz_repsearch3(strptr, min(bytes_remaining, LZX_MAX_MATCH_LEN),
+ queue->R, rep_max_idx_ret);
+}
+
+/*
+ * The main near-optimal parsing routine.
+ *
+ * Briefly, the algorithm does an approximate minimum-cost path search to find a
+ * "near-optimal" sequence of matches and literals to output, based on the
+ * current cost model. The algorithm steps forward, position by position (byte
+ * by byte), and updates the minimum cost path to reach each later position that
+ * can be reached using a match or literal from the current position. This is
+ * essentially Dijkstra's algorithm in disguise: the graph nodes are positions,
+ * the graph edges are possible matches/literals to code, and the cost of each
+ * edge is the estimated number of bits that will be required to output the
+ * corresponding match or literal. But one difference is that we actually
+ * compute the lowest-cost path in pieces, where each piece is terminated when
+ * there are no choices to be made.
+ *
+ * This function will run this algorithm on the portion of the window from
+ * &c->cur_window[c->match_window_pos] to &c->cur_window[c->match_window_end].
+ *
+ * On entry, c->queue must be the current state of the match offset LRU queue,
+ * and c->costs must be the current cost model to use for Huffman symbols.
+ *
+ * On exit, c->queue will be the state that the LRU queue would be in if the
+ * chosen items were to be coded.
+ *
+ * If next_chosen_item != NULL, then all items chosen will be recorded (saved in
+ * the chosen_items array). Otherwise, all items chosen will only be tallied
+ * (symbol frequencies tallied in c->freqs).
+ */
+static void
+lzx_optim_pass(struct lzx_compressor *c, struct lzx_item **next_chosen_item)
+{
+ const u8 *block_end;
+ struct lzx_lru_queue *begin_queue;
+ const u8 *window_ptr;
+ struct lzx_mc_pos_data *cur_optimum_ptr;
+ struct lzx_mc_pos_data *end_optimum_ptr;
+ const struct lz_match *matches;
+ unsigned num_matches;
+ unsigned longest_len;
+ unsigned rep_max_len;
+ unsigned rep_max_idx;
+ unsigned literal;
+ unsigned len;
+ u32 cost;
+ u32 offset_data;
+
+ block_end = &c->cur_window[c->match_window_end];
+ begin_queue = &c->queue;
+begin:
+ /* Start building a new list of items, which will correspond to the next
+ * piece of the overall minimum-cost path.
*
- * Upon loop termination, a nonempty list of matches/literals will have
- * been produced and stored in the @optimum array. These
- * matches/literals are linked in reverse order, so the last thing this
- * function does is reverse this list and return the first
- * match/literal, leaving the rest to be returned immediately by
- * subsequent calls to this function.
- */
- cur_pos = 0;
- for (;;) {
- u32 cost;
-
- /* Advance to next position. */
- cur_pos++;
-
- /* Check termination conditions (2) and (3) noted above. */
- if (cur_pos == end_pos || cur_pos == LZX_OPTIM_ARRAY_LENGTH)
- return lzx_match_chooser_reverse_list(c, cur_pos);
-
- /* Search for matches at recent offsets. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- c->match_window_end - c->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = c->optimum[cur_pos].queue.R[i];
- const u8 *strptr = &c->cur_window[c->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
- }
- }
+ * *begin_queue is the current state of the match offset LRU queue. */
+
+ window_ptr = &c->cur_window[c->match_window_pos];
- /* If we found a long match at a recent offset, choose it
- * immediately. */
- if (longest_rep_len >= c->params.nice_match_length) {
- /* Build the list of matches to return and get
- * the first one. */
- match = lzx_match_chooser_reverse_list(c, cur_pos);
+ if (window_ptr == block_end) {
+ c->queue = *begin_queue;
+ return;
+ }
- /* Append the long match to the end of the list. */
- c->optimum[cur_pos].next.match_offset = longest_rep_offset;
- c->optimum[cur_pos].next.link = cur_pos + longest_rep_len;
- c->optimum_end_idx = cur_pos + longest_rep_len;
+ cur_optimum_ptr = c->optimum;
+ cur_optimum_ptr->cost = 0;
+ cur_optimum_ptr->queue = *begin_queue;
- /* Skip over the remaining bytes of the long match. */
- lzx_skip_bytes(c, longest_rep_len);
+ end_optimum_ptr = cur_optimum_ptr;
- /* Return first match in the list. */
- return match;
- }
+ /* The following loop runs once for each per byte in the window, except
+ * in a couple shortcut cases. */
+ for (;;) {
- /* Search other matches. */
+ /* Find explicit offset matches with the current position. */
num_matches = lzx_get_matches(c, &matches);
- /* If there's a long match, take it. */
if (num_matches) {
+ /*
+ * Find the longest repeat offset match with the current
+ * position.
+ *
+ * Heuristics:
+ *
+ * - Only search for repeat offset matches if the
+ * match-finder already found at least one match.
+ *
+ * - Only consider the longest repeat offset match. It
+ * seems to be rare for the optimal parse to include a
+ * repeat offset match that doesn't have the longest
+ * length (allowing for the possibility that not all
+ * of that length is actually used).
+ */
+ rep_max_len = lzx_repsearch(window_ptr,
+ block_end - window_ptr,
+ &cur_optimum_ptr->queue,
+ &rep_max_idx);
+
+ if (rep_max_len) {
+ /* If there's a very long repeat offset match,
+ * choose it immediately. */
+ if (rep_max_len >= c->params.nice_match_length) {
+
+ swap(cur_optimum_ptr->queue.R[0],
+ cur_optimum_ptr->queue.R[rep_max_idx]);
+ begin_queue = &cur_optimum_ptr->queue;
+
+ cur_optimum_ptr += rep_max_len;
+ cur_optimum_ptr->mc_item_data =
+ (rep_max_idx << MC_OFFSET_SHIFT) |
+ rep_max_len;
+
+ lzx_skip_bytes(c, rep_max_len - 1);
+ break;
+ }
+
+ /* If reaching any positions for the first time,
+ * initialize their costs to "infinity". */
+ while (end_optimum_ptr < cur_optimum_ptr + rep_max_len)
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
+
+ /* Consider coding a repeat offset match. */
+ lzx_consider_repeat_offset_match(c,
+ cur_optimum_ptr,
+ rep_max_len,
+ rep_max_idx);
+ }
+
longest_len = matches[num_matches - 1].len;
+
+ /* If there's a very long explicit offset match, choose
+ * it immediately. */
if (longest_len >= c->params.nice_match_length) {
- /* Build the list of matches to return and get
- * the first one. */
- match = lzx_match_chooser_reverse_list(c, cur_pos);
- /* Append the long match to the end of the list. */
- c->optimum[cur_pos].next.match_offset =
+ cur_optimum_ptr->queue.R[2] =
+ cur_optimum_ptr->queue.R[1];
+ cur_optimum_ptr->queue.R[1] =
+ cur_optimum_ptr->queue.R[0];
+ cur_optimum_ptr->queue.R[0] =
matches[num_matches - 1].offset;
- c->optimum[cur_pos].next.link = cur_pos + longest_len;
- c->optimum_end_idx = cur_pos + longest_len;
+ begin_queue = &cur_optimum_ptr->queue;
- /* Skip over the remaining bytes of the long match. */
- lzx_skip_bytes(c, longest_len - 1);
+ offset_data = matches[num_matches - 1].offset +
+ LZX_OFFSET_OFFSET;
+ cur_optimum_ptr += longest_len;
+ cur_optimum_ptr->mc_item_data =
+ (offset_data << MC_OFFSET_SHIFT) |
+ longest_len;
- /* Return first match in the list. */
- return match;
+ lzx_skip_bytes(c, longest_len - 1);
+ break;
}
+
+ /* If reaching any positions for the first time,
+ * initialize their costs to "infinity". */
+ while (end_optimum_ptr < cur_optimum_ptr + longest_len)
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
+
+ /* Consider coding an explicit offset match. */
+ lzx_consider_explicit_offset_matches(c, cur_optimum_ptr,
+ matches, num_matches);
} else {
- longest_len = 1;
+ /* No matches found. The only choice at this position
+ * is to code a literal. */
+
+ if (end_optimum_ptr == cur_optimum_ptr) {
+ #if 1
+ /* Optimization for single literals. */
+ if (likely(cur_optimum_ptr == c->optimum)) {
+ lzx_declare_literal(c, *window_ptr++,
+ next_chosen_item);
+ if (window_ptr == block_end) {
+ c->queue = cur_optimum_ptr->queue;
+ return;
+ }
+ continue;
+ }
+ #endif
+ (++end_optimum_ptr)->cost = MC_INFINITE_COST;
+ }
}
- while (end_pos < cur_pos + longest_len)
- c->optimum[++end_pos].cost = MC_INFINITE_COST;
-
- /* Consider coding a literal. */
- cost = c->optimum[cur_pos].cost +
- lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
- &c->costs);
- if (cost < c->optimum[cur_pos + 1].cost) {
- c->optimum[cur_pos + 1].queue = c->optimum[cur_pos].queue;
- c->optimum[cur_pos + 1].cost = cost;
- c->optimum[cur_pos + 1].prev.link = cur_pos;
- }
+ /* Consider coding a literal.
- /* Consider coding a match.
- *
- * The hard-coded cost calculation is done for the same reason
- * stated in the comment for the similar loop earlier.
- * Actually, it is *this* one that has the biggest effect on
- * performance; overall LZX compression is > 10% faster with
- * this code compared to calling lzx_match_cost() with each
- * length. */
- for (unsigned i = 0, len = 2; i < num_matches; i++) {
- u32 offset;
- struct lzx_lru_queue queue;
- u32 position_cost;
- unsigned position_slot;
- unsigned num_extra_bits;
-
- offset = matches[i].offset;
- queue = c->optimum[cur_pos].queue;
- position_cost = c->optimum[cur_pos].cost;
-
- position_slot = lzx_get_position_slot(offset, &queue);
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
- if (num_extra_bits >= 3) {
- position_cost += num_extra_bits - 3;
- position_cost += c->costs.aligned[
- (offset + LZX_OFFSET_OFFSET) & 7];
+ * To avoid an extra unpredictable brench, actually checking the
+ * preferability of coding a literal is integrated into the
+ * queue update code below. */
+ literal = *window_ptr++;
+ cost = cur_optimum_ptr->cost + lzx_literal_cost(literal, &c->costs);
+
+ /* Advance to the next position. */
+ cur_optimum_ptr++;
+
+ /* The lowest-cost path to the current position is now known.
+ * Finalize the recent offsets queue that results from taking
+ * this lowest-cost path. */
+
+ if (cost < cur_optimum_ptr->cost) {
+ /* Literal: queue remains unchanged. */
+ cur_optimum_ptr->cost = cost;
+ cur_optimum_ptr->mc_item_data = (literal << MC_OFFSET_SHIFT) | 1;
+ cur_optimum_ptr->queue = (cur_optimum_ptr - 1)->queue;
+ } else {
+ /* Match: queue update is needed. */
+ len = cur_optimum_ptr->mc_item_data & MC_LEN_MASK;
+ offset_data = cur_optimum_ptr->mc_item_data >> MC_OFFSET_SHIFT;
+ if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
+ /* Explicit offset match: offset is inserted at front */
+ cur_optimum_ptr->queue.R[0] = offset_data - LZX_OFFSET_OFFSET;
+ cur_optimum_ptr->queue.R[1] = (cur_optimum_ptr - len)->queue.R[0];
+ cur_optimum_ptr->queue.R[2] = (cur_optimum_ptr - len)->queue.R[1];
} else {
- position_cost += num_extra_bits;
+ /* Repeat offset match: offset is swapped to front */
+ cur_optimum_ptr->queue = (cur_optimum_ptr - len)->queue;
+ swap(cur_optimum_ptr->queue.R[0],
+ cur_optimum_ptr->queue.R[offset_data]);
}
-
- do {
- unsigned len_header;
- unsigned main_symbol;
- u32 cost;
-
- cost = position_cost;
-
- len_header = min(len - LZX_MIN_MATCH_LEN,
- LZX_NUM_PRIMARY_LENS);
- main_symbol = ((position_slot << 3) | len_header) +
- LZX_NUM_CHARS;
- cost += c->costs.main[main_symbol];
- if (len_header == LZX_NUM_PRIMARY_LENS) {
- cost += c->costs.len[len -
- LZX_MIN_MATCH_LEN -
- LZX_NUM_PRIMARY_LENS];
- }
- if (cost < c->optimum[cur_pos + len].cost) {
- c->optimum[cur_pos + len].queue = queue;
- c->optimum[cur_pos + len].prev.link = cur_pos;
- c->optimum[cur_pos + len].prev.match_offset = offset;
- c->optimum[cur_pos + len].cost = cost;
- }
- } while (++len <= matches[i].len);
}
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
-
- while (end_pos < cur_pos + longest_rep_len)
- c->optimum[++end_pos].cost = MC_INFINITE_COST;
-
- queue = c->optimum[cur_pos].queue;
-
- cost = c->optimum[cur_pos].cost +
- lzx_match_cost(longest_rep_len, longest_rep_offset,
- &c->costs, &queue);
- if (cost <= c->optimum[cur_pos + longest_rep_len].cost) {
- c->optimum[cur_pos + longest_rep_len].queue =
- queue;
- c->optimum[cur_pos + longest_rep_len].prev.link =
- cur_pos;
- c->optimum[cur_pos + longest_rep_len].prev.match_offset =
- longest_rep_offset;
- c->optimum[cur_pos + longest_rep_len].cost =
- cost;
- }
+ /*
+ * This loop will terminate when either of the following
+ * conditions is true:
+ *
+ * (1) cur_optimum_ptr == end_optimum_ptr
+ *
+ * There are no paths that extend beyond the current
+ * position. In this case, any path to a later position
+ * must pass through the current position, so we can go
+ * ahead and choose the list of items that led to this
+ * position.
+ *
+ * (2) cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH]
+ *
+ * This bounds the number of times the algorithm can step
+ * forward before it is guaranteed to start choosing items.
+ * This limits the memory usage. But
+ * LZX_OPTIM_ARRAY_LENGTH is high enough that on most
+ * inputs this limit is never reached.
+ *
+ * Note: no check for end-of-block is needed because
+ * end-of-block will trigger condition (1).
+ */
+ if (cur_optimum_ptr == end_optimum_ptr ||
+ cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH])
+ {
+ begin_queue = &cur_optimum_ptr->queue;
+ break;
}
}
+
+ /* Choose the current list of items that constitute the minimum-cost
+ * path to the current position. */
+ lzx_declare_item_list(c, cur_optimum_ptr, next_chosen_item);
+ goto begin;
}
-static struct lz_match
-lzx_choose_lazy_item(struct lzx_compressor *c)
+/* Fast heuristic scoring for lazy parsing: how "good" is this match? */
+static inline unsigned
+lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
{
- const struct lz_match *matches;
- struct lz_match cur_match;
- struct lz_match next_match;
- u32 num_matches;
+ unsigned score = len;
+
+ if (adjusted_offset < 2048)
+ score++;
+
+ if (adjusted_offset < 1024)
+ score++;
+
+ return score;
+}
+
+static inline unsigned
+lzx_repeat_offset_match_score(unsigned len, unsigned slot)
+{
+ return len + 3;
+}
+
+/* Lazy parsing */
+static u32
+lzx_choose_lazy_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
+{
+ const u8 *window_ptr;
+ const u8 *block_end;
+ struct lz_mf *mf;
+ struct lz_match *matches;
+ unsigned num_matches;
+ unsigned cur_len;
+ u32 cur_offset_data;
+ unsigned cur_score;
+ unsigned rep_max_len;
+ unsigned rep_max_idx;
+ unsigned rep_score;
+ unsigned prev_len;
+ unsigned prev_score;
+ u32 prev_offset_data;
+ unsigned skip_len;
+ struct lzx_item *next_chosen_item;
+
+ window_ptr = &c->cur_window[block_start_pos];
+ block_end = window_ptr + block_size;
+ matches = c->cached_matches;
+ mf = c->mf;
+ next_chosen_item = c->chosen_items;
+
+ prev_len = 0;
+ prev_offset_data = 0;
+ prev_score = 0;
+
+ while (window_ptr != block_end) {
+
+ /* Find explicit offset matches with the current position. */
+ num_matches = lz_mf_get_matches(mf, matches);
+ window_ptr++;
- if (c->prev_match.len) {
- cur_match = c->prev_match;
- c->prev_match.len = 0;
- } else {
- num_matches = lzx_get_matches(c, &matches);
if (num_matches == 0 ||
- (matches[num_matches - 1].len <= 3 &&
- (matches[num_matches - 1].len <= 2 ||
- matches[num_matches - 1].offset > 4096)))
+ (matches[num_matches - 1].len == 3 &&
+ matches[num_matches - 1].offset >= 8192 - LZX_OFFSET_OFFSET &&
+ matches[num_matches - 1].offset != c->queue.R[0] &&
+ matches[num_matches - 1].offset != c->queue.R[1] &&
+ matches[num_matches - 1].offset != c->queue.R[2]))
{
- return (struct lz_match) { };
+ /* No match found, or the only match found was a distant
+ * length 3 match. Output the previous match if there
+ * is one; otherwise output a literal. */
+
+ no_match_found:
+
+ if (prev_len) {
+ skip_len = prev_len - 2;
+ goto output_prev_match;
+ } else {
+ lzx_declare_literal(c, *(window_ptr - 1),
+ &next_chosen_item);
+ continue;
+ }
}
- cur_match = matches[num_matches - 1];
- }
+ /* Find the longest repeat offset match with the current
+ * position. */
+ if (likely(block_end - (window_ptr - 1) >= 2)) {
+ rep_max_len = lzx_repsearch((window_ptr - 1),
+ block_end - (window_ptr - 1),
+ &c->queue, &rep_max_idx);
+ } else {
+ rep_max_len = 0;
+ }
- if (cur_match.len >= c->params.nice_match_length) {
- lzx_skip_bytes(c, cur_match.len - 1);
- return cur_match;
- }
+ cur_len = matches[num_matches - 1].len;
+ cur_offset_data = matches[num_matches - 1].offset + LZX_OFFSET_OFFSET;
+ cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
- num_matches = lzx_get_matches(c, &matches);
- if (num_matches == 0 ||
- (matches[num_matches - 1].len <= 3 &&
- (matches[num_matches - 1].len <= 2 ||
- matches[num_matches - 1].offset > 4096)))
- {
- lzx_skip_bytes(c, cur_match.len - 2);
- return cur_match;
- }
+ /* Select the better of the explicit and repeat offset matches. */
+ if (rep_max_len >= 3 &&
+ (rep_score = lzx_repeat_offset_match_score(rep_max_len,
+ rep_max_idx)) >= cur_score)
+ {
+ cur_len = rep_max_len;
+ cur_offset_data = rep_max_idx;
+ cur_score = rep_score;
+ }
- next_match = matches[num_matches - 1];
+ if (unlikely(cur_len > block_end - (window_ptr - 1))) {
+ /* Nearing end of block. */
+ cur_len = block_end - (window_ptr - 1);
+ if (cur_len < 3)
+ goto no_match_found;
+ }
- if (next_match.len <= cur_match.len) {
- lzx_skip_bytes(c, cur_match.len - 2);
- return cur_match;
- } else {
- c->prev_match = next_match;
- return (struct lz_match) { };
- }
-}
+ if (prev_len == 0 || cur_score > prev_score) {
+ /* No previous match, or the current match is better
+ * than the previous match.
+ *
+ * If there's a previous match, then output a literal in
+ * its place.
+ *
+ * In both cases, if the current match is very long,
+ * then output it immediately. Otherwise, attempt a
+ * lazy match by waiting to see if there's a better
+ * match at the next position. */
-/*
- * Return the next match or literal to use, delegating to the currently selected
- * match-choosing algorithm.
- *
- * If the length of the returned 'struct lz_match' is less than
- * LZX_MIN_MATCH_LEN, then it is really a literal.
- */
-static inline struct lz_match
-lzx_choose_item(struct lzx_compressor *c)
-{
- return (*c->params.choose_item_func)(c);
-}
+ if (prev_len)
+ lzx_declare_literal(c, *(window_ptr - 2), &next_chosen_item);
-/* Set default symbol costs for the LZX Huffman codes. */
-static void
-lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
-{
- unsigned i;
+ prev_len = cur_len;
+ prev_offset_data = cur_offset_data;
+ prev_score = cur_score;
- /* Main code (part 1): Literal symbols */
- for (i = 0; i < LZX_NUM_CHARS; i++)
- costs->main[i] = 8;
+ if (prev_len >= c->params.nice_match_length) {
+ skip_len = prev_len - 1;
+ goto output_prev_match;
+ }
+ continue;
+ }
- /* Main code (part 2): Match header symbols */
- for (; i < num_main_syms; i++)
- costs->main[i] = 10;
+ /* Current match is not better than the previous match, so
+ * output the previous match. */
- /* Length code */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- costs->len[i] = 8;
+ skip_len = prev_len - 2;
- /* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- costs->aligned[i] = 3;
+ output_prev_match:
+ if (prev_offset_data < LZX_NUM_RECENT_OFFSETS) {
+ lzx_declare_repeat_offset_match(c, prev_len,
+ prev_offset_data,
+ &next_chosen_item);
+ swap(c->queue.R[0], c->queue.R[prev_offset_data]);
+ } else {
+ lzx_declare_explicit_offset_match(c, prev_len,
+ prev_offset_data - LZX_OFFSET_OFFSET,
+ &next_chosen_item);
+ c->queue.R[2] = c->queue.R[1];
+ c->queue.R[1] = c->queue.R[0];
+ c->queue.R[0] = prev_offset_data - LZX_OFFSET_OFFSET;
+ }
+ lz_mf_skip_positions(mf, skip_len);
+ window_ptr += skip_len;
+ prev_len = 0;
+ }
+
+ return next_chosen_item - c->chosen_items;
}
/* Given the frequencies of symbols in an LZX-compressed block and the
lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
const struct lzx_codes * codes)
{
- unsigned aligned_cost = 0;
- unsigned verbatim_cost = 0;
+ u32 aligned_cost = 0;
+ u32 verbatim_cost = 0;
- /* Verbatim blocks have a constant 3 bits per position footer. Aligned
- * offset blocks have an aligned offset symbol per position footer, plus
- * an extra 24 bits per block to output the lengths necessary to
- * reconstruct the aligned offset code itself. */
+ /* A verbatim block requires 3 bits in each place that an aligned symbol
+ * would be used in an aligned offset block. */
for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
verbatim_cost += 3 * freqs->aligned[i];
aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
}
+
+ /* Account for output of the aligned offset code. */
aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
+
if (aligned_cost < verbatim_cost)
return LZX_BLOCKTYPE_ALIGNED;
else
return LZX_BLOCKTYPE_VERBATIM;
}
-/* Find a sequence of matches/literals with which to output the specified LZX
- * block, then set the block's type to that which has the minimum cost to output
- * (either verbatim or aligned). */
-static void
-lzx_choose_items_for_block(struct lzx_compressor *c, struct lzx_block_spec *spec)
+/* Near-optimal parsing */
+static u32
+lzx_choose_near_optimal_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
{
- const struct lzx_lru_queue orig_queue = c->queue;
u32 num_passes_remaining = c->params.num_optim_passes;
- struct lzx_freqs freqs;
- const u8 *window_ptr;
- const u8 *window_end;
+ struct lzx_lru_queue orig_queue;
struct lzx_item *next_chosen_item;
- struct lz_match lz_match;
- struct lzx_item lzx_item;
-
- LZX_ASSERT(num_passes >= 1);
- LZX_ASSERT(lz_mf_get_position(c->mf) == spec->window_pos);
+ struct lzx_item **next_chosen_item_ptr;
- c->match_window_end = spec->window_pos + spec->block_size;
-
- if (c->params.num_optim_passes > 1) {
- if (spec->block_size == c->cur_window_size)
+ /* Choose appropriate match-finder wrapper functions. */
+ if (num_passes_remaining > 1) {
+ if (block_size == c->cur_window_size)
c->get_matches_func = lzx_get_matches_fillcache_singleblock;
else
c->get_matches_func = lzx_get_matches_fillcache_multiblock;
c->skip_bytes_func = lzx_skip_bytes_fillcache;
} else {
- if (spec->block_size == c->cur_window_size)
+ if (block_size == c->cur_window_size)
c->get_matches_func = lzx_get_matches_nocache_singleblock;
else
c->get_matches_func = lzx_get_matches_nocache_multiblock;
c->skip_bytes_func = lzx_skip_bytes_nocache;
}
- /* The first optimal parsing pass is done using the cost model already
- * set in c->costs. Each later pass is done using a cost model
- * computed from the previous pass.
+ /* No matches will extend beyond the end of the block. */
+ c->match_window_end = block_start_pos + block_size;
+
+ /* The first optimization pass will use a default cost model. Each
+ * additional optimization pass will use a cost model computed from the
+ * previous pass.
*
* To improve performance we only generate the array containing the
- * matches and literals in intermediate form on the final pass. */
+ * matches and literals in intermediate form on the final pass. For
+ * earlier passes, tallying symbol frequencies is sufficient. */
+ lzx_set_default_costs(&c->costs, c->num_main_syms);
- while (--num_passes_remaining) {
- c->match_window_pos = spec->window_pos;
+ next_chosen_item_ptr = NULL;
+ orig_queue = c->queue;
+ do {
+ /* Reset the match-finder wrapper. */
+ c->match_window_pos = block_start_pos;
c->cache_ptr = c->cached_matches;
- memset(&freqs, 0, sizeof(freqs));
- window_ptr = &c->cur_window[spec->window_pos];
- window_end = window_ptr + spec->block_size;
- while (window_ptr != window_end) {
+ if (num_passes_remaining == 1) {
+ /* Last pass: actually generate the items. */
+ next_chosen_item = c->chosen_items;
+ next_chosen_item_ptr = &next_chosen_item;
+ }
- lz_match = lzx_choose_item(c);
+ /* Choose the items. */
+ lzx_optim_pass(c, next_chosen_item_ptr);
- LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
- lz_match.offset == c->max_window_size -
- LZX_MIN_MATCH_LEN));
- if (lz_match.len >= LZX_MIN_MATCH_LEN) {
- lzx_tally_match(lz_match.len, lz_match.offset,
- &freqs, &c->queue);
- window_ptr += lz_match.len;
- } else {
- lzx_tally_literal(*window_ptr, &freqs);
- window_ptr += 1;
- }
- }
- lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
- lzx_set_costs(c, &spec->codes.lens, 15);
- c->queue = orig_queue;
- if (c->cache_ptr <= c->cache_limit) {
- c->get_matches_func = lzx_get_matches_usecache_nocheck;
- c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
- } else {
- c->get_matches_func = lzx_get_matches_usecache;
- c->skip_bytes_func = lzx_skip_bytes_usecache;
- }
- }
+ if (num_passes_remaining > 1) {
+ /* This isn't the last pass. */
- c->match_window_pos = spec->window_pos;
- c->cache_ptr = c->cached_matches;
- memset(&freqs, 0, sizeof(freqs));
- window_ptr = &c->cur_window[spec->window_pos];
- window_end = window_ptr + spec->block_size;
+ /* Make the Huffman codes from the symbol frequencies. */
+ lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
+ c->num_main_syms);
- spec->chosen_items = &c->chosen_items[spec->window_pos];
- next_chosen_item = spec->chosen_items;
+ /* Update symbol costs. */
+ lzx_set_costs(c, &c->codes[c->codes_index].lens);
- unsigned unseen_cost = 9;
- while (window_ptr != window_end) {
+ /* Reset symbol frequencies. */
+ memset(&c->freqs, 0, sizeof(c->freqs));
- lz_match = lzx_choose_item(c);
+ /* Reset the match offset LRU queue to what it was at
+ * the beginning of the block. */
+ c->queue = orig_queue;
- LZX_ASSERT(!(lz_match.len == LZX_MIN_MATCH_LEN &&
- lz_match.offset == c->max_window_size -
- LZX_MIN_MATCH_LEN));
- if (lz_match.len >= LZX_MIN_MATCH_LEN) {
- lzx_item.data = lzx_tally_match(lz_match.len,
- lz_match.offset,
- &freqs, &c->queue);
- window_ptr += lz_match.len;
- } else {
- lzx_item.data = lzx_tally_literal(*window_ptr, &freqs);
- window_ptr += 1;
+ /* Choose appopriate match-finder wrapper functions. */
+ if (c->cache_ptr <= c->cache_limit) {
+ c->get_matches_func = lzx_get_matches_usecache_nocheck;
+ c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
+ } else {
+ c->get_matches_func = lzx_get_matches_usecache;
+ c->skip_bytes_func = lzx_skip_bytes_usecache;
+ }
}
- *next_chosen_item++ = lzx_item;
+ } while (--num_passes_remaining);
- /* When doing one-pass "near-optimal" parsing, update the cost
- * model occassionally. */
- if (unlikely((next_chosen_item - spec->chosen_items) % 2048 == 0) &&
- c->params.choose_item_func == lzx_choose_near_optimal_item &&
- c->params.num_optim_passes == 1)
- {
- lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
- lzx_set_costs(c, &spec->codes.lens, unseen_cost);
- if (unseen_cost < 15)
- unseen_cost++;
- }
- }
- spec->num_chosen_items = next_chosen_item - spec->chosen_items;
- lzx_make_huffman_codes(&freqs, &spec->codes, c->num_main_syms);
- spec->block_type = lzx_choose_verbatim_or_aligned(&freqs, &spec->codes);
+ /* Return the number of items chosen. */
+ return next_chosen_item - c->chosen_items;
+}
+
+/*
+ * Choose the matches/literals with which to output the block of data beginning
+ * at '&c->cur_window[block_start_pos]' and extending for 'block_size' bytes.
+ *
+ * The frequences of the Huffman symbols in the block will be tallied in
+ * 'c->freqs'.
+ *
+ * 'c->queue' must specify the state of the queue at the beginning of this block.
+ * This function will update it to the state of the queue at the end of this
+ * block.
+ *
+ * Returns the number of matches/literals that were chosen and written to
+ * 'c->chosen_items' in the 'struct lzx_item' intermediate representation.
+ */
+static u32
+lzx_choose_items_for_block(struct lzx_compressor *c,
+ u32 block_start_pos, u32 block_size)
+{
+ return (*c->params.choose_items_for_block)(c, block_start_pos, block_size);
}
-/* Prepare the input window into one or more LZX blocks ready to be output. */
+/* Initialize c->offset_slot_fast. */
static void
-lzx_prepare_blocks(struct lzx_compressor *c)
+lzx_init_offset_slot_fast(struct lzx_compressor *c)
{
- /* Set up a default cost model. */
- if (c->params.choose_item_func == lzx_choose_near_optimal_item)
- lzx_set_default_costs(&c->costs, c->num_main_syms);
+ u8 slot = 0;
- /* Set up the block specifications.
- * TODO: The compression ratio could be slightly improved by performing
- * data-dependent block splitting instead of using fixed-size blocks.
- * Doing so well is a computationally hard problem, however. */
- c->num_blocks = DIV_ROUND_UP(c->cur_window_size, LZX_DIV_BLOCK_SIZE);
- for (unsigned i = 0; i < c->num_blocks; i++) {
- u32 pos = LZX_DIV_BLOCK_SIZE * i;
- c->block_specs[i].window_pos = pos;
- c->block_specs[i].block_size = min(c->cur_window_size - pos,
- LZX_DIV_BLOCK_SIZE);
- }
+ for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
- /* Load the window into the match-finder. */
- lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
+ while (offset + LZX_OFFSET_OFFSET >= lzx_offset_slot_base[slot + 1])
+ slot++;
- /* Determine sequence of matches/literals to output for each block. */
- lzx_lru_queue_init(&c->queue);
- c->optimum_cur_idx = 0;
- c->optimum_end_idx = 0;
- c->prev_match.len = 0;
- for (unsigned i = 0; i < c->num_blocks; i++)
- lzx_choose_items_for_block(c, &c->block_specs[i]);
+ c->offset_slot_fast[offset] = slot;
+ }
}
+/* Set internal compression parameters for the specified compression level and
+ * maximum window size. */
static void
-lzx_build_params(unsigned int compression_level,
- u32 max_window_size,
+lzx_build_params(unsigned int compression_level, u32 max_window_size,
struct lzx_compressor_params *lzx_params)
{
if (compression_level < 25) {
- lzx_params->choose_item_func = lzx_choose_lazy_item;
- lzx_params->num_optim_passes = 1;
+
+ /* Fast compression: Use lazy parsing. */
+
+ lzx_params->choose_items_for_block = lzx_choose_lazy_items_for_block;
+ lzx_params->num_optim_passes = 1;
+
+ /* When lazy parsing, the hash chain match-finding algorithm is
+ * fastest unless the window is too large.
+ *
+ * TODO: something like hash arrays would actually be better
+ * than binary trees on large windows. */
if (max_window_size <= 262144)
lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
else
lzx_params->mf_algo = LZ_MF_BINARY_TREES;
- lzx_params->min_match_length = 3;
+
+ /* When lazy parsing, don't bother with length 2 matches. */
+ lzx_params->min_match_length = 3;
+
+ /* Scale nice_match_length and max_search_depth with the
+ * compression level. */
lzx_params->nice_match_length = 25 + compression_level * 2;
- lzx_params->max_search_depth = 25 + compression_level;
+ lzx_params->max_search_depth = 25 + compression_level;
} else {
- lzx_params->choose_item_func = lzx_choose_near_optimal_item;
- lzx_params->num_optim_passes = compression_level / 20;
+
+ /* Normal / high compression: Use near-optimal parsing. */
+
+ lzx_params->choose_items_for_block = lzx_choose_near_optimal_items_for_block;
+
+ /* Set a number of optimization passes appropriate for the
+ * compression level. */
+
+ lzx_params->num_optim_passes = 1;
+
+ if (compression_level >= 40)
+ lzx_params->num_optim_passes++;
+
+ /* Use more optimization passes for higher compression levels.
+ * But the more passes there are, the less they help --- so
+ * don't add them linearly. */
+ if (compression_level >= 70) {
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 100)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 150)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 200)
+ lzx_params->num_optim_passes++;
+ if (compression_level >= 300)
+ lzx_params->num_optim_passes++;
+ }
+
+ /* When doing near-optimal parsing, the hash chain match-finding
+ * algorithm is good if the window size is small and we're only
+ * doing one optimization pass. Otherwise, the binary tree
+ * algorithm is the way to go. */
if (max_window_size <= 32768 && lzx_params->num_optim_passes == 1)
lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
else
lzx_params->mf_algo = LZ_MF_BINARY_TREES;
- lzx_params->min_match_length = (compression_level >= 45) ? 2 : 3;
+
+ /* When doing near-optimal parsing, allow length 2 matches if
+ * the compression level is sufficiently high. */
+ if (compression_level >= 45)
+ lzx_params->min_match_length = 2;
+ else
+ lzx_params->min_match_length = 3;
+
+ /* Scale nice_match_length and max_search_depth with the
+ * compression level. */
lzx_params->nice_match_length = min(((u64)compression_level * 32) / 50,
LZX_MAX_MATCH_LEN);
- lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
- LZX_MAX_MATCH_LEN);
+ lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
+ LZX_MAX_MATCH_LEN);
}
}
+/* Given the internal compression parameters and maximum window size, build the
+ * Lempel-Ziv match-finder parameters. */
static void
lzx_build_mf_params(const struct lzx_compressor_params *lzx_params,
u32 max_window_size, struct lz_mf_params *mf_params)
lzx_free_compressor(void *_c);
static u64
-lzx_get_needed_memory(size_t max_window_size, unsigned int compression_level)
+lzx_get_needed_memory(size_t max_block_size, unsigned int compression_level)
{
struct lzx_compressor_params params;
u64 size = 0;
+ unsigned window_order;
+ u32 max_window_size;
- if (!lzx_window_size_valid(max_window_size))
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return 0;
+ max_window_size = max_block_size;
lzx_build_params(compression_level, max_window_size, ¶ms);
size += sizeof(struct lzx_compressor);
+ /* cur_window */
size += max_window_size;
- size += DIV_ROUND_UP(max_window_size, LZX_DIV_BLOCK_SIZE) *
- sizeof(struct lzx_block_spec);
-
- size += max_window_size * sizeof(struct lzx_item);
-
+ /* mf */
size += lz_mf_get_needed_memory(params.mf_algo, max_window_size);
- if (params.choose_item_func == lzx_choose_near_optimal_item) {
- size += (LZX_OPTIM_ARRAY_LENGTH + params.nice_match_length) *
- sizeof(struct lzx_mc_pos_data);
- }
+
+ /* cached_matches */
if (params.num_optim_passes > 1)
size += LZX_CACHE_LEN * sizeof(struct lz_match);
else
}
static int
-lzx_create_compressor(size_t max_window_size, unsigned int compression_level,
+lzx_create_compressor(size_t max_block_size, unsigned int compression_level,
void **c_ret)
{
struct lzx_compressor *c;
struct lzx_compressor_params params;
struct lz_mf_params mf_params;
+ unsigned window_order;
+ u32 max_window_size;
- if (!lzx_window_size_valid(max_window_size))
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return WIMLIB_ERR_INVALID_PARAM;
+ max_window_size = max_block_size;
lzx_build_params(compression_level, max_window_size, ¶ms);
lzx_build_mf_params(¶ms, max_window_size, &mf_params);
goto oom;
c->params = params;
- c->num_main_syms = lzx_get_num_main_syms(max_window_size);
- c->max_window_size = max_window_size;
+ c->num_main_syms = lzx_get_num_main_syms(window_order);
+ c->window_order = window_order;
+ /* The window is allocated as 16-byte aligned to speed up memcpy() and
+ * enable lzx_e8_filter() optimization on x86_64. */
c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
if (!c->cur_window)
goto oom;
- c->block_specs = MALLOC(DIV_ROUND_UP(max_window_size,
- LZX_DIV_BLOCK_SIZE) *
- sizeof(struct lzx_block_spec));
- if (!c->block_specs)
- goto oom;
-
- c->chosen_items = MALLOC(max_window_size * sizeof(struct lzx_item));
- if (!c->chosen_items)
- goto oom;
-
c->mf = lz_mf_alloc(&mf_params);
if (!c->mf)
goto oom;
- if (params.choose_item_func == lzx_choose_near_optimal_item) {
- c->optimum = MALLOC((LZX_OPTIM_ARRAY_LENGTH +
- params.nice_match_length) *
- sizeof(struct lzx_mc_pos_data));
- if (!c->optimum)
- goto oom;
- }
-
if (params.num_optim_passes > 1) {
c->cached_matches = MALLOC(LZX_CACHE_LEN *
sizeof(struct lz_match));
goto oom;
}
+ lzx_init_offset_slot_fast(c);
+
*c_ret = c;
return 0;
void *compressed_data, size_t compressed_size_avail, void *_c)
{
struct lzx_compressor *c = _c;
- struct output_bitstream ostream;
- size_t compressed_size;
+ struct lzx_output_bitstream os;
+ u32 num_chosen_items;
+ const struct lzx_lens *prev_lens;
+ u32 block_start_pos;
+ u32 block_size;
+ int block_type;
- if (uncompressed_size < 100) {
- LZX_DEBUG("Too small to bother compressing.");
+ /* Don't bother compressing very small inputs. */
+ if (uncompressed_size < 100)
return 0;
- }
-
- LZX_DEBUG("Attempting to compress %zu bytes...",
- uncompressed_size);
/* The input data must be preprocessed. To avoid changing the original
- * input, copy it to a temporary buffer. */
+ * input data, copy it to a temporary buffer. */
memcpy(c->cur_window, uncompressed_data, uncompressed_size);
c->cur_window_size = uncompressed_size;
- /* Before doing any actual compression, do the call instruction (0xe8
- * byte) translation on the uncompressed data. */
+ /* Preprocess the data. */
lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
- /* Prepare the compressed data. */
- lzx_prepare_blocks(c);
+ /* Load the window into the match-finder. */
+ lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
+
+ /* Initialize the match offset LRU queue. */
+ lzx_lru_queue_init(&c->queue);
- /* Generate the compressed data. */
- init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
- lzx_write_all_blocks(c, &ostream);
+ /* Initialize the output bitstream. */
+ lzx_init_output(&os, compressed_data, compressed_size_avail);
- compressed_size = flush_output_bitstream(&ostream);
- if (compressed_size == (u32)~0UL) {
- LZX_DEBUG("Data did not compress to %zu bytes or less!",
- compressed_size_avail);
- return 0;
- }
+ /* Compress the data block by block.
+ *
+ * TODO: The compression ratio could be slightly improved by performing
+ * data-dependent block splitting instead of using fixed-size blocks.
+ * Doing so well is a computationally hard problem, however. */
+ block_start_pos = 0;
+ c->codes_index = 0;
+ prev_lens = &c->zero_lens;
+ do {
+ /* Compute the block size. */
+ block_size = min(LZX_DIV_BLOCK_SIZE,
+ uncompressed_size - block_start_pos);
+
+ /* Reset symbol frequencies. */
+ memset(&c->freqs, 0, sizeof(c->freqs));
+
+ /* Prepare the matches/literals for the block. */
+ num_chosen_items = lzx_choose_items_for_block(c,
+ block_start_pos,
+ block_size);
+
+ /* Make the Huffman codes from the symbol frequencies. */
+ lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
+ c->num_main_syms);
+
+ /* Choose the best block type.
+ *
+ * Note: we currently don't consider uncompressed blocks. */
+ block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
+ &c->codes[c->codes_index]);
+
+ /* Write the compressed block to the output buffer. */
+ lzx_write_compressed_block(block_type,
+ block_size,
+ c->window_order,
+ c->num_main_syms,
+ c->chosen_items,
+ num_chosen_items,
+ &c->codes[c->codes_index],
+ prev_lens,
+ &os);
+
+ /* The current codeword lengths become the previous lengths. */
+ prev_lens = &c->codes[c->codes_index].lens;
+ c->codes_index ^= 1;
+
+ block_start_pos += block_size;
- LZX_DEBUG("Done: compressed %zu => %zu bytes.",
- uncompressed_size, compressed_size);
+ } while (block_start_pos != uncompressed_size);
- return compressed_size;
+ return lzx_flush_output(&os);
}
static void
if (c) {
ALIGNED_FREE(c->cur_window);
- FREE(c->block_specs);
- FREE(c->chosen_items);
lz_mf_free(c->mf);
- FREE(c->optimum);
FREE(c->cached_matches);
FREE(c);
}