/*
* lzx_compress.c
*
- * A compressor for the LZX compression format, as used in WIM files.
+ * A compressor for the LZX compression format, as used in WIM archives.
*/
/*
- * Copyright (C) 2012, 2013, 2014 Eric Biggers
+ * Copyright (C) 2012-2017 Eric Biggers
*
* This file is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* This file contains a compressor for the LZX ("Lempel-Ziv eXtended")
* compression format, as used in the WIM (Windows IMaging) file format.
*
- * Two different parsing algorithms are implemented: "near-optimal" and "lazy".
- * "Near-optimal" is significantly slower than "lazy", but results in a better
- * compression ratio. The "near-optimal" algorithm is used at the default
- * compression level.
+ * Two different LZX-compatible algorithms are implemented: "near-optimal" and
+ * "lazy". "Near-optimal" is significantly slower than "lazy", but results in a
+ * better compression ratio. The "near-optimal" algorithm is used at the
+ * default compression level.
*
* This file may need some slight modifications to be used outside of the WIM
* format. In particular, in other situations the LZX block header might be
- * slightly different, and a sliding window rather than a fixed-size window
- * might be required.
+ * slightly different, and sliding window support might be required.
*
- * Note: LZX is a compression format derived from DEFLATE, the format used by
- * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding.
- * Certain details are quite similar, such as the method for storing Huffman
- * codes. However, the main differences are:
+ * LZX is a compression format derived from DEFLATE, the format used by zlib and
+ * gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain
+ * details are quite similar, such as the method for storing Huffman codes.
+ * However, the main differences are:
*
* - LZX preprocesses the data to attempt to make x86 machine code slightly more
* compressible before attempting to compress it further.
* ("verbatim" and "aligned").
*
* - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be
- * useful, but generally only if the parser is smart about choosing them.
+ * useful, but generally only if the compressor is smart about choosing them.
*
* - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue
* of match offsets. This is very useful for certain types of files, such as
* binary files that have repeating records.
*/
+/******************************************************************************/
+/* General parameters */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The compressor uses the faster algorithm at levels <= MAX_FAST_LEVEL. It
+ * uses the slower algorithm at levels > MAX_FAST_LEVEL.
+ */
+#define MAX_FAST_LEVEL 34
+
+/*
+ * The compressor-side limits on the codeword lengths (in bits) for each Huffman
+ * code. To make outputting bits slightly faster, some of these limits are
+ * lower than the limits defined by the LZX format. This does not significantly
+ * affect the compression ratio.
+ */
+#define MAIN_CODEWORD_LIMIT 16
+#define LENGTH_CODEWORD_LIMIT 12
+#define ALIGNED_CODEWORD_LIMIT 7
+#define PRE_CODEWORD_LIMIT 7
+
+
+/******************************************************************************/
+/* Block splitting parameters */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The compressor always outputs blocks of at least this size in bytes, except
+ * for the last block which may need to be smaller.
+ */
+#define MIN_BLOCK_SIZE 6500
+
+/*
+ * The compressor attempts to end a block when it reaches this size in bytes.
+ * The final size might be slightly larger due to matches extending beyond the
+ * end of the block. Specifically:
+ *
+ * - The near-optimal compressor may choose a match of up to LZX_MAX_MATCH_LEN
+ * bytes starting at position 'SOFT_MAX_BLOCK_SIZE - 1'.
+ *
+ * - The lazy compressor may choose a sequence of literals starting at position
+ * 'SOFT_MAX_BLOCK_SIZE - 1' when it sees a sequence of increasingly better
+ * matches. The final match may be up to LZX_MAX_MATCH_LEN bytes. The
+ * length of the literal sequence is approximately limited by the "nice match
+ * length" parameter.
+ */
+#define SOFT_MAX_BLOCK_SIZE 100000
+
+/*
+ * The number of observed items (matches and literals) that represents
+ * sufficient data for the compressor to decide whether the current block should
+ * be ended or not.
+ */
+#define NUM_OBSERVATIONS_PER_BLOCK_CHECK 400
+
+
+/******************************************************************************/
+/* Parameters for slower algorithm */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The log base 2 of the number of entries in the hash table for finding length
+ * 2 matches. This could be as high as 16, but using a smaller hash table
+ * speeds up compression due to reduced cache pressure.
+ */
+#define BT_MATCHFINDER_HASH2_ORDER 12
+
+/*
+ * The number of lz_match structures in the match cache, excluding the extra
+ * "overflow" entries. This value should be high enough so that nearly the
+ * time, all matches found in a given block can fit in the match cache.
+ * However, fallback behavior (immediately terminating the block) on cache
+ * overflow is still required.
+ */
+#define CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5)
+
+/*
+ * An upper bound on the number of matches that can ever be saved in the match
+ * cache for a single position. Since each match we save for a single position
+ * has a distinct length, we can use the number of possible match lengths in LZX
+ * as this bound. This bound is guaranteed to be valid in all cases, although
+ * if 'nice_match_length < LZX_MAX_MATCH_LEN', then it will never actually be
+ * reached.
+ */
+#define MAX_MATCHES_PER_POS LZX_NUM_LENS
+
+/*
+ * A scaling factor that makes it possible to consider fractional bit costs. A
+ * single bit has a cost of BIT_COST.
+ *
+ * Note: this is only useful as a statistical trick for when the true costs are
+ * unknown. Ultimately, each token in LZX requires a whole number of bits to
+ * output.
+ */
+#define BIT_COST 64
+
+/*
+ * Should the compressor take into account the costs of aligned offset symbols
+ * instead of assuming that all are equally likely?
+ */
+#define CONSIDER_ALIGNED_COSTS 1
+
+/*
+ * Should the "minimum" cost path search algorithm consider "gap" matches, where
+ * a normal match is followed by a literal, then by a match with the same
+ * offset? This is one specific, somewhat common situation in which the true
+ * minimum cost path is often different from the path found by looking only one
+ * edge ahead.
+ */
+#define CONSIDER_GAP_MATCHES 1
+
+/******************************************************************************/
+/* Includes */
+/*----------------------------------------------------------------------------*/
+
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "wimlib/compress_common.h"
#include "wimlib/compressor_ops.h"
-#include "wimlib/endianness.h"
#include "wimlib/error.h"
-#include "wimlib/lz_mf.h"
-#include "wimlib/lz_repsearch.h"
+#include "wimlib/lz_extend.h"
#include "wimlib/lzx_common.h"
+#include "wimlib/unaligned.h"
#include "wimlib/util.h"
-#include <string.h>
-#include <limits.h>
-
-#define LZX_OPTIM_ARRAY_LENGTH 4096
-
-#define LZX_DIV_BLOCK_SIZE 32768
+/* Note: BT_MATCHFINDER_HASH2_ORDER must be defined before including
+ * bt_matchfinder.h. */
-#define LZX_CACHE_PER_POS 8
+/* Matchfinders with 16-bit positions */
+#define mf_pos_t u16
+#define MF_SUFFIX _16
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
-#define LZX_MAX_MATCHES_PER_POS (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
+/* Matchfinders with 32-bit positions */
+#undef mf_pos_t
+#undef MF_SUFFIX
+#define mf_pos_t u32
+#define MF_SUFFIX _32
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
-#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
+/******************************************************************************/
+/* Compressor structure */
+/*----------------------------------------------------------------------------*/
-struct lzx_compressor;
-
-/* Codewords for the LZX Huffman codes. */
+/* Codewords for the Huffman codes */
struct lzx_codewords {
u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u32 len[LZX_LENCODE_NUM_SYMBOLS];
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Codeword lengths (in bits) for the LZX Huffman codes.
- * A zero length means the corresponding codeword has zero frequency. */
+/*
+ * Codeword lengths, in bits, for the Huffman codes.
+ *
+ * A codeword length of 0 means the corresponding codeword has zero frequency.
+ *
+ * The main and length codes each have one extra entry for use as a sentinel.
+ * See lzx_write_compressed_code().
+ */
struct lzx_lens {
- u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- u8 len[LZX_LENCODE_NUM_SYMBOLS];
- u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
-};
-
-/* Estimated cost, in bits, to output each symbol in the LZX Huffman codes. */
-struct lzx_costs {
- u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- u8 len[LZX_LENCODE_NUM_SYMBOLS];
+ u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
+ u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Codewords and lengths for the LZX Huffman codes. */
+/* Codewords and lengths for the Huffman codes */
struct lzx_codes {
struct lzx_codewords codewords;
struct lzx_lens lens;
};
-/* Symbol frequency counters for the LZX Huffman codes. */
+/* Symbol frequency counters for the Huffman-encoded alphabets */
struct lzx_freqs {
u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
u32 len[LZX_LENCODE_NUM_SYMBOLS];
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Intermediate LZX match/literal format */
-struct lzx_item {
-
- /* Bits 0 - 9: Main symbol
- * Bits 10 - 17: Length symbol
- * Bits 18 - 22: Number of extra offset bits
- * Bits 23+ : Extra offset bits */
- u64 data;
+/* Block split statistics. See the "Block splitting algorithm" section later in
+ * this file for details. */
+#define NUM_LITERAL_OBSERVATION_TYPES 8
+#define NUM_MATCH_OBSERVATION_TYPES 2
+#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + \
+ NUM_MATCH_OBSERVATION_TYPES)
+struct lzx_block_split_stats {
+ u32 new_observations[NUM_OBSERVATION_TYPES];
+ u32 observations[NUM_OBSERVATION_TYPES];
+ u32 num_new_observations;
+ u32 num_observations;
};
-/* Internal compression parameters */
-struct lzx_compressor_params {
- u32 (*choose_items_for_block)(struct lzx_compressor *, u32, u32);
- u32 num_optim_passes;
- enum lz_mf_algo mf_algo;
- u32 min_match_length;
- u32 nice_match_length;
- u32 max_search_depth;
-};
+/*
+ * Represents a run of literals followed by a match or end-of-block. This
+ * structure is needed to temporarily store items chosen by the compressor,
+ * since items cannot be written until all items for the block have been chosen
+ * and the block's Huffman codes have been computed.
+ */
+struct lzx_sequence {
+
+ /*
+ * Bits 9..31: the number of literals in this run. This may be 0 and
+ * can be at most about SOFT_MAX_BLOCK_LENGTH. The literals are not
+ * stored explicitly in this structure; instead, they are read directly
+ * from the uncompressed data.
+ *
+ * Bits 0..8: the length of the match which follows the literals, or 0
+ * if this literal run was the last in the block, so there is no match
+ * which follows it. This can be at most LZX_MAX_MATCH_LEN.
+ */
+ u32 litrunlen_and_matchlen;
+#define SEQ_MATCHLEN_BITS 9
+#define SEQ_MATCHLEN_MASK (((u32)1 << SEQ_MATCHLEN_BITS) - 1)
+
+ /*
+ * If 'matchlen' doesn't indicate end-of-block, then this contains:
+ *
+ * Bits 10..31: either the offset plus LZX_OFFSET_ADJUSTMENT or a recent
+ * offset code, depending on the offset slot encoded in the main symbol.
+ *
+ * Bits 0..9: the main symbol.
+ */
+ u32 adjusted_offset_and_mainsym;
+#define SEQ_MAINSYM_BITS 10
+#define SEQ_MAINSYM_MASK (((u32)1 << SEQ_MAINSYM_BITS) - 1)
+} _aligned_attribute(8);
/*
- * Match chooser position data:
+ * This structure represents a byte position in the input buffer and a node in
+ * the graph of possible match/literal choices.
*
- * An array of these structures is used during the near-optimal match-choosing
- * algorithm. They correspond to consecutive positions in the window and are
- * used to keep track of the cost to reach each position, and the match/literal
- * choices that need to be chosen to reach that position.
+ * Logically, each incoming edge to this node is labeled with a literal or a
+ * match that can be taken to reach this position from an earlier position; and
+ * each outgoing edge from this node is labeled with a literal or a match that
+ * can be taken to advance from this position to a later position.
*/
-struct lzx_mc_pos_data {
+struct lzx_optimum_node {
/* The cost, in bits, of the lowest-cost path that has been found to
* reach this position. This can change as progressively lower cost
* paths are found to reach this position. */
u32 cost;
-#define MC_INFINITE_COST UINT32_MAX
- /* The match or literal that was taken to reach this position. This can
- * change as progressively lower cost paths are found to reach this
- * position.
+ /*
+ * The best arrival to this node, i.e. the match or literal that was
+ * used to arrive to this position at the given 'cost'. This can change
+ * as progressively lower cost paths are found to reach this position.
*
- * This variable is divided into two bitfields.
+ * For non-gap matches, this variable is divided into two bitfields
+ * whose meanings depend on the item type:
*
* Literals:
- * Low bits are 1, high bits are the literal.
+ * Low bits are 0, high bits are the literal.
*
* Explicit offset matches:
- * Low bits are the match length, high bits are the offset plus 2.
+ * Low bits are the match length, high bits are the offset plus
+ * LZX_OFFSET_ADJUSTMENT.
*
* Repeat offset matches:
* Low bits are the match length, high bits are the queue index.
+ *
+ * For gap matches, identified by OPTIMUM_GAP_MATCH set, special
+ * behavior applies --- see the code.
*/
- u32 mc_item_data;
-#define MC_OFFSET_SHIFT 9
-#define MC_LEN_MASK ((1 << MC_OFFSET_SHIFT) - 1)
+ u32 item;
+#define OPTIMUM_OFFSET_SHIFT SEQ_MATCHLEN_BITS
+#define OPTIMUM_LEN_MASK SEQ_MATCHLEN_MASK
+#if CONSIDER_GAP_MATCHES
+# define OPTIMUM_GAP_MATCH 0x80000000
+#endif
- /* The state of the LZX recent match offsets queue at this position.
- * This is filled in lazily, only after the minimum-cost path to this
- * position is found.
- *
- * Note: the way we handle this adaptive state in the "minimum-cost"
- * parse is actually only an approximation. It's possible for the
- * globally optimal, minimum cost path to contain a prefix, ending at a
- * position, where that path prefix is *not* the minimum cost path to
- * that position. This can happen if such a path prefix results in a
- * different adaptive state which results in lower costs later. We do
- * not solve this problem; we only consider the lowest cost to reach
- * each position, which seems to be an acceptable approximation. */
- struct lzx_lru_queue queue _aligned_attribute(16);
-
-} _aligned_attribute(16);
-
-/* State of the LZX compressor */
+} _aligned_attribute(8);
+
+/* The cost model for near-optimal parsing */
+struct lzx_costs {
+
+ /*
+ * 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost of a
+ * length 'len' match which has an offset belonging to 'offset_slot'.
+ * The cost includes the main symbol, the length symbol if required, and
+ * the extra offset bits if any, excluding any entropy-coded bits
+ * (aligned offset bits). It does *not* include the cost of the aligned
+ * offset symbol which may be required.
+ */
+ u16 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS];
+
+ /* Cost of each symbol in the main code */
+ u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
+
+ /* Cost of each symbol in the length code */
+ u32 len[LZX_LENCODE_NUM_SYMBOLS];
+
+#if CONSIDER_ALIGNED_COSTS
+ /* Cost of each symbol in the aligned offset code */
+ u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
+#endif
+};
+
+struct lzx_output_bitstream;
+
+/* The main LZX compressor structure */
struct lzx_compressor {
- /* Internal compression parameters */
- struct lzx_compressor_params params;
+ /* The buffer for preprocessed input data, if not using destructive
+ * compression */
+ void *in_buffer;
- /* The preprocessed buffer of data being compressed */
- u8 *cur_window;
+ /* If true, then the compressor need not preserve the input buffer if it
+ * compresses the data successfully */
+ bool destructive;
- /* Number of bytes of data to be compressed, which is the number of
- * bytes of data in @cur_window that are actually valid. */
- u32 cur_window_size;
+ /* Pointer to the compress() implementation chosen at allocation time */
+ void (*impl)(struct lzx_compressor *, const u8 *, size_t,
+ struct lzx_output_bitstream *);
- /* log2 order of the LZX window size for LZ match offset encoding
- * purposes. Will be >= LZX_MIN_WINDOW_ORDER and <=
- * LZX_MAX_WINDOW_ORDER.
- *
- * Note: 1 << @window_order is normally equal to @max_window_size,
- * a.k.a. the allocated size of @cur_window, but it will be greater than
- * @max_window_size in the event that the compressor was created with a
- * non-power-of-2 block size. (See lzx_get_window_order().) */
+ /* The log base 2 of the window size for match offset encoding purposes.
+ * This will be >= LZX_MIN_WINDOW_ORDER and <= LZX_MAX_WINDOW_ORDER. */
unsigned window_order;
- /* Number of symbols in the main alphabet. This depends on
- * @window_order, since @window_order determines the maximum possible
- * offset. It does not, however, depend on the *actual* size of the
- * current data buffer being processed, which might be less than 1 <<
- * @window_order. */
+ /* The number of symbols in the main alphabet. This depends on the
+ * window order, since the window order determines the maximum possible
+ * match offset. */
unsigned num_main_syms;
- /* Lempel-Ziv match-finder */
- struct lz_mf *mf;
+ /* The "nice" match length: if a match of this length is found, then it
+ * is chosen immediately without further consideration. */
+ unsigned nice_match_length;
- /* Match-finder wrapper functions and data for near-optimal parsing.
- *
- * When doing more than one match-choosing pass over the data, matches
- * found by the match-finder are cached to achieve a slight speedup when
- * the same matches are needed on subsequent passes. This is suboptimal
- * because different matches may be preferred with different cost
- * models, but it is a very worthwhile speedup. */
- unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
- void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
- u32 match_window_pos;
- u32 match_window_end;
- struct lz_match *cached_matches;
- struct lz_match *cache_ptr;
- struct lz_match *cache_limit;
-
- /* Position data for near-optimal parsing. */
- struct lzx_mc_pos_data optimum[LZX_OPTIM_ARRAY_LENGTH + LZX_MAX_MATCH_LEN];
-
- /* The cost model currently being used for near-optimal parsing. */
- struct lzx_costs costs;
-
- /* The current match offset LRU queue. */
- struct lzx_lru_queue queue;
-
- /* Frequency counters for the current block. */
+ /* The maximum search depth: at most this many potential matches are
+ * considered at each position. */
+ unsigned max_search_depth;
+
+ /* The number of optimization passes per block */
+ unsigned num_optim_passes;
+
+ /* The symbol frequency counters for the current block */
struct lzx_freqs freqs;
- /* The Huffman codes for the current and previous blocks. */
- struct lzx_codes codes[2];
+ /* Block split statistics for the current block */
+ struct lzx_block_split_stats split_stats;
- /* Which 'struct lzx_codes' is being used for the current block. The
- * other was used for the previous block (if this isn't the first
- * block). */
- unsigned int codes_index;
+ /* The Huffman codes for the current and previous blocks. The one with
+ * index 'codes_index' is for the current block, and the other one is
+ * for the previous block. */
+ struct lzx_codes codes[2];
+ unsigned codes_index;
+
+ /* The matches and literals that the compressor has chosen for the
+ * current block. The required length of this array is limited by the
+ * maximum number of matches that can ever be chosen for a single block,
+ * plus one for the special entry at the end. */
+ struct lzx_sequence chosen_sequences[
+ DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
+
+ /* Tables for mapping adjusted offsets to offset slots */
+ u8 offset_slot_tab_1[32768]; /* offset slots [0, 29] */
+ u8 offset_slot_tab_2[128]; /* offset slots [30, 49] */
+
+ union {
+ /* Data for lzx_compress_lazy() */
+ struct {
+ /* Hash chains matchfinder (MUST BE LAST!!!) */
+ union {
+ struct hc_matchfinder_16 hc_mf_16;
+ struct hc_matchfinder_32 hc_mf_32;
+ };
+ };
- /* Dummy lengths that are always 0. */
- struct lzx_lens zero_lens;
+ /* Data for lzx_compress_near_optimal() */
+ struct {
+ /*
+ * Array of nodes, one per position, for running the
+ * minimum-cost path algorithm.
+ *
+ * This array must be large enough to accommodate the
+ * worst-case number of nodes, which occurs if the
+ * compressor finds a match of length LZX_MAX_MATCH_LEN
+ * at position 'SOFT_MAX_BLOCK_SIZE - 1', producing a
+ * block of size 'SOFT_MAX_BLOCK_SIZE - 1 +
+ * LZX_MAX_MATCH_LEN'. Add one for the end-of-block
+ * node.
+ */
+ struct lzx_optimum_node optimum_nodes[
+ SOFT_MAX_BLOCK_SIZE - 1 +
+ LZX_MAX_MATCH_LEN + 1];
- /* Matches/literals that were chosen for the current block. */
- struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE];
+ /* The cost model for the current optimization pass */
+ struct lzx_costs costs;
- /* Table mapping match offset => offset slot for small offsets */
-#define LZX_NUM_FAST_OFFSETS 32768
- u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
+ /*
+ * Cached matches for the current block. This array
+ * contains the matches that were found at each position
+ * in the block. Specifically, for each position, there
+ * is a special 'struct lz_match' whose 'length' field
+ * contains the number of matches that were found at
+ * that position; this is followed by the matches
+ * themselves, if any, sorted by strictly increasing
+ * length.
+ *
+ * Note: in rare cases, there will be a very high number
+ * of matches in the block and this array will overflow.
+ * If this happens, we force the end of the current
+ * block. CACHE_LENGTH is the length at which we
+ * actually check for overflow. The extra slots beyond
+ * this are enough to absorb the worst case overflow,
+ * which occurs if starting at &match_cache[CACHE_LENGTH
+ * - 1], we write the match count header, then write
+ * MAX_MATCHES_PER_POS matches, then skip searching for
+ * matches at 'LZX_MAX_MATCH_LEN - 1' positions and
+ * write the match count header for each.
+ */
+ struct lz_match match_cache[CACHE_LENGTH +
+ MAX_MATCHES_PER_POS +
+ LZX_MAX_MATCH_LEN - 1];
+
+ /* Binary trees matchfinder (MUST BE LAST!!!) */
+ union {
+ struct bt_matchfinder_16 bt_mf_16;
+ struct bt_matchfinder_32 bt_mf_32;
+ };
+ };
+ };
};
+/******************************************************************************/
+/* Matchfinder utilities */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Will a matchfinder using 16-bit positions be sufficient for compressing
+ * buffers of up to the specified size? The limit could be 65536 bytes, but we
+ * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
+ * This requires that the limit be no more than the length of offset_slot_tab_1
+ * (currently 32768).
+ */
+static forceinline bool
+lzx_is_16_bit(size_t max_bufsize)
+{
+ STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
+ return max_bufsize <= 32768;
+}
+
+/*
+ * Return the offset slot for the specified adjusted match offset.
+ */
+static forceinline unsigned
+lzx_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
+ bool is_16_bit)
+{
+ if (__builtin_constant_p(adjusted_offset) &&
+ adjusted_offset < LZX_NUM_RECENT_OFFSETS)
+ return adjusted_offset;
+ if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
+ return c->offset_slot_tab_1[adjusted_offset];
+ return c->offset_slot_tab_2[adjusted_offset >> 14];
+}
+
+/*
+ * For a match that has the specified length and adjusted offset, tally its main
+ * symbol, and if needed its length symbol; then return its main symbol.
+ */
+static forceinline unsigned
+lzx_tally_main_and_lensyms(struct lzx_compressor *c, unsigned length,
+ u32 adjusted_offset, bool is_16_bit)
+{
+ unsigned mainsym;
+
+ if (length >= LZX_MIN_SECONDARY_LEN) {
+ /* Length symbol needed */
+ c->freqs.len[length - LZX_MIN_SECONDARY_LEN]++;
+ mainsym = LZX_NUM_CHARS + LZX_NUM_PRIMARY_LENS;
+ } else {
+ /* No length symbol needed */
+ mainsym = LZX_NUM_CHARS + length - LZX_MIN_MATCH_LEN;
+ }
+
+ mainsym += LZX_NUM_LEN_HEADERS *
+ lzx_get_offset_slot(c, adjusted_offset, is_16_bit);
+ c->freqs.main[mainsym]++;
+ return mainsym;
+}
+
+/*
+ * The following macros call either the 16-bit or the 32-bit version of a
+ * matchfinder function based on the value of 'is_16_bit', which will be known
+ * at compilation time.
+ */
+
+#define CALL_HC_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
+
+#define CALL_BT_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
+
+/******************************************************************************/
+/* Output bitstream */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The LZX bitstream is encoded as a sequence of little endian 16-bit coding
+ * units. Bits are ordered from most significant to least significant within
+ * each coding unit.
+ */
+
/*
* Structure to keep track of the current state of sending bits to the
* compressed output buffer.
- *
- * The LZX bitstream is encoded as a sequence of 16-bit coding units.
*/
struct lzx_output_bitstream {
- /* Bits that haven't yet been written to the output buffer. */
- u32 bitbuf;
+ /* Bits that haven't yet been written to the output buffer */
+ machine_word_t bitbuf;
- /* Number of bits currently held in @bitbuf. */
- u32 bitcount;
+ /* Number of bits currently held in @bitbuf */
+ machine_word_t bitcount;
- /* Pointer to the start of the output buffer. */
- le16 *start;
+ /* Pointer to the start of the output buffer */
+ u8 *start;
/* Pointer to the position in the output buffer at which the next coding
- * unit should be written. */
- le16 *next;
+ * unit should be written */
+ u8 *next;
- /* Pointer past the end of the output buffer. */
- le16 *end;
+ /* Pointer to just past the end of the output buffer, rounded down by
+ * one byte if needed to make 'end - start' a multiple of 2 */
+ u8 *end;
};
-/*
- * Initialize the output bitstream.
- *
- * @os
- * The output bitstream structure to initialize.
- * @buffer
- * The buffer being written to.
- * @size
- * Size of @buffer, in bytes.
- */
+/* Can the specified number of bits always be added to 'bitbuf' after all
+ * pending 16-bit coding units have been flushed? */
+#define CAN_BUFFER(n) ((n) <= WORDBITS - 15)
+
+/* Initialize the output bitstream to write to the specified buffer. */
static void
-lzx_init_output(struct lzx_output_bitstream *os, void *buffer, u32 size)
+lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size)
{
os->bitbuf = 0;
os->bitcount = 0;
os->start = buffer;
- os->next = os->start;
- os->end = os->start + size / sizeof(le16);
+ os->next = buffer;
+ os->end = (u8 *)buffer + (size & ~1);
}
/*
- * Write some bits to the output bitstream.
- *
- * The bits are given by the low-order @num_bits bits of @bits. Higher-order
- * bits in @bits cannot be set. At most 17 bits can be written at once.
- *
- * @max_num_bits is a compile-time constant that specifies the maximum number of
- * bits that can ever be written at the call site. Currently, it is used to
- * optimize away the conditional code for writing a second 16-bit coding unit
- * when writing fewer than 17 bits.
- *
- * If the output buffer space is exhausted, then the bits will be ignored, and
- * lzx_flush_output() will return 0 when it gets called.
+ * Add some bits to the bitbuffer variable of the output bitstream. The caller
+ * must make sure there is enough room.
*/
-static inline void
-lzx_write_varbits(struct lzx_output_bitstream *os,
- const u32 bits, const unsigned int num_bits,
- const unsigned int max_num_bits)
+static forceinline void
+lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
{
- /* This code is optimized for LZX, which never needs to write more than
- * 17 bits at once. */
- LZX_ASSERT(num_bits <= 17);
- LZX_ASSERT(num_bits <= max_num_bits);
- LZX_ASSERT(os->bitcount <= 15);
-
- /* Add the bits to the bit buffer variable. @bitcount will be at most
- * 15, so there will be just enough space for the maximum possible
- * @num_bits of 17. */
- os->bitcount += num_bits;
os->bitbuf = (os->bitbuf << num_bits) | bits;
+ os->bitcount += num_bits;
+}
- /* Check whether any coding units need to be written. */
- if (os->bitcount >= 16) {
-
- os->bitcount -= 16;
-
- /* Write a coding unit, unless it would overflow the buffer. */
- if (os->next != os->end)
- put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
+/*
+ * Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
+ * specifies the maximum number of bits that may have been added since the last
+ * flush.
+ */
+static forceinline void
+lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
+{
+ /* Masking the number of bits to shift is only needed to avoid undefined
+ * behavior; we don't actually care about the results of bad shifts. On
+ * x86, the explicit masking generates no extra code. */
+ const u32 shift_mask = WORDBITS - 1;
- /* If writing 17 bits, a second coding unit might need to be
- * written. But because 'max_num_bits' is a compile-time
- * constant, the compiler will optimize away this code at most
- * call sites. */
- if (max_num_bits == 17 && os->bitcount == 16) {
- if (os->next != os->end)
- put_unaligned_u16_le(os->bitbuf, os->next++);
- os->bitcount = 0;
- }
- }
+ if (os->end - os->next < 6)
+ return;
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
+ shift_mask), os->next + 0);
+ if (max_num_bits > 16)
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
+ shift_mask), os->next + 2);
+ if (max_num_bits > 32)
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
+ shift_mask), os->next + 4);
+ os->next += (os->bitcount >> 4) << 1;
+ os->bitcount &= 15;
}
-/* Use when @num_bits is a compile-time constant. Otherwise use
- * lzx_write_varbits(). */
-static inline void
-lzx_write_bits(struct lzx_output_bitstream *os,
- const u32 bits, const unsigned int num_bits)
+/* Add at most 16 bits to the bitbuffer and flush it. */
+static forceinline void
+lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
{
- lzx_write_varbits(os, bits, num_bits, num_bits);
+ lzx_add_bits(os, bits, num_bits);
+ lzx_flush_bits(os, 16);
}
/*
* Flush the last coding unit to the output buffer if needed. Return the total
* number of bytes written to the output buffer, or 0 if an overflow occurred.
*/
-static u32
+static size_t
lzx_flush_output(struct lzx_output_bitstream *os)
{
- if (os->next == os->end)
+ if (os->end - os->next < 6)
return 0;
- if (os->bitcount != 0)
- put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
+ if (os->bitcount != 0) {
+ put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
+ os->next += 2;
+ }
- return (const u8 *)os->next - (const u8 *)os->start;
+ return os->next - os->start;
}
-/* Build the main, length, and aligned offset Huffman codes used in LZX.
- *
- * This takes as input the frequency tables for each code and produces as output
- * a set of tables that map symbols to codewords and codeword lengths. */
+/******************************************************************************/
+/* Preparing Huffman codes */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Build the Huffman codes. This takes as input the frequency tables for each
+ * code and produces as output a set of tables that map symbols to codewords and
+ * codeword lengths.
+ */
static void
-lzx_make_huffman_codes(const struct lzx_freqs *freqs, struct lzx_codes *codes,
- unsigned num_main_syms)
+lzx_build_huffman_codes(struct lzx_compressor *c)
{
- make_canonical_huffman_code(num_main_syms,
- LZX_MAX_MAIN_CODEWORD_LEN,
+ const struct lzx_freqs *freqs = &c->freqs;
+ struct lzx_codes *codes = &c->codes[c->codes_index];
+
+ STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
+ MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
+ make_canonical_huffman_code(c->num_main_syms,
+ MAIN_CODEWORD_LIMIT,
freqs->main,
codes->lens.main,
codes->codewords.main);
+ STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
+ LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
- LZX_MAX_LEN_CODEWORD_LEN,
+ LENGTH_CODEWORD_LIMIT,
freqs->len,
codes->lens.len,
codes->codewords.len);
+ STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
+ ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
- LZX_MAX_ALIGNED_CODEWORD_LEN,
+ ALIGNED_CODEWORD_LIMIT,
freqs->aligned,
codes->lens.aligned,
codes->codewords.aligned);
}
+/* Reset the symbol frequencies for the current block. */
+static void
+lzx_reset_symbol_frequencies(struct lzx_compressor *c)
+{
+ memset(&c->freqs, 0, sizeof(c->freqs));
+}
+
static unsigned
lzx_compute_precode_items(const u8 lens[restrict],
const u8 prev_lens[restrict],
- const unsigned num_lens,
u32 precode_freqs[restrict],
unsigned precode_items[restrict])
{
itemptr = precode_items;
run_start = 0;
- do {
- /* Find the next run of codeword lengths. */
+
+ while (!((len = lens[run_start]) & 0x80)) {
/* len = the length being repeated */
- len = lens[run_start];
+
+ /* Find the next run of codeword lengths. */
run_end = run_start + 1;
/* Fast case for a single length. */
- if (likely(run_end == num_lens || len != lens[run_end])) {
+ if (likely(len != lens[run_end])) {
delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
/* Extend the run. */
do {
run_end++;
- } while (run_end != num_lens && len == lens[run_end]);
+ } while (len == lens[run_end]);
if (len == 0) {
/* Run of zeroes. */
/* Symbol 18: RLE 20 to 51 zeroes at a time. */
while ((run_end - run_start) >= 20) {
- extra_bits = min((run_end - run_start) - 20, 0x1f);
+ extra_bits = min((run_end - run_start) - 20, 0x1F);
precode_freqs[18]++;
*itemptr++ = 18 | (extra_bits << 5);
run_start += 20 + extra_bits;
/* Symbol 17: RLE 4 to 19 zeroes at a time. */
if ((run_end - run_start) >= 4) {
- extra_bits = min((run_end - run_start) - 4, 0xf);
+ extra_bits = min((run_end - run_start) - 4, 0xF);
precode_freqs[17]++;
*itemptr++ = 17 | (extra_bits << 5);
run_start += 4 + extra_bits;
*itemptr++ = delta;
run_start++;
}
- } while (run_start != num_lens);
+ }
return itemptr - precode_items;
}
+/******************************************************************************/
+/* Outputting compressed data */
+/*----------------------------------------------------------------------------*/
+
/*
* Output a Huffman code in the compressed form used in LZX.
*
unsigned precode_item;
unsigned precode_sym;
unsigned i;
+ u8 saved = lens[num_lens];
+ *(u8 *)(lens + num_lens) = 0x80;
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
precode_freqs[i] = 0;
* the codeword lengths in the larger code will be output. */
num_precode_items = lzx_compute_precode_items(lens,
prev_lens,
- num_lens,
precode_freqs,
precode_items);
/* Build the precode. */
- make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
+ STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
+ PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
+ make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS, PRE_CODEWORD_LIMIT,
precode_freqs, precode_lens,
precode_codewords);
for (i = 0; i < num_precode_items; i++) {
precode_item = precode_items[i];
precode_sym = precode_item & 0x1F;
- lzx_write_varbits(os, precode_codewords[precode_sym],
- precode_lens[precode_sym],
- LZX_MAX_PRE_CODEWORD_LEN);
+ lzx_add_bits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym]);
if (precode_sym >= 17) {
if (precode_sym == 17) {
- lzx_write_bits(os, precode_item >> 5, 4);
+ lzx_add_bits(os, precode_item >> 5, 4);
} else if (precode_sym == 18) {
- lzx_write_bits(os, precode_item >> 5, 5);
+ lzx_add_bits(os, precode_item >> 5, 5);
} else {
- lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ lzx_add_bits(os, (precode_item >> 5) & 1, 1);
precode_sym = precode_item >> 6;
- lzx_write_varbits(os, precode_codewords[precode_sym],
- precode_lens[precode_sym],
- LZX_MAX_PRE_CODEWORD_LEN);
+ lzx_add_bits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym]);
}
}
+ STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
+ lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
}
-}
-
-/* Output a match or literal. */
-static inline void
-lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
- unsigned ones_if_aligned, const struct lzx_codes *codes)
-{
- u64 data = item.data;
- unsigned main_symbol;
- unsigned len_symbol;
- unsigned num_extra_bits;
- u32 extra_bits;
-
- main_symbol = data & 0x3FF;
-
- lzx_write_varbits(os, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol],
- LZX_MAX_MAIN_CODEWORD_LEN);
-
- if (main_symbol < LZX_NUM_CHARS) /* Literal? */
- return;
-
- len_symbol = (data >> 10) & 0xFF;
-
- if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
- lzx_write_varbits(os, codes->codewords.len[len_symbol],
- codes->lens.len[len_symbol],
- LZX_MAX_LEN_CODEWORD_LEN);
- }
-
- num_extra_bits = (data >> 18) & 0x1F;
- if (num_extra_bits == 0) /* Small offset or repeat offset match? */
- return;
- extra_bits = data >> 23;
-
- /*if (block_type == LZX_BLOCKTYPE_ALIGNED && num_extra_bits >= 3) {*/
- if ((num_extra_bits & ones_if_aligned) >= 3) {
-
- /* Aligned offset blocks: The low 3 bits of the extra offset
- * bits are Huffman-encoded using the aligned offset code. The
- * remaining bits are output literally. */
-
- lzx_write_varbits(os, extra_bits >> 3, num_extra_bits - 3, 14);
-
- lzx_write_varbits(os, codes->codewords.aligned[extra_bits & 7],
- codes->lens.aligned[extra_bits & 7],
- LZX_MAX_ALIGNED_CODEWORD_LEN);
- } else {
- /* Verbatim blocks, or fewer than 3 extra bits: All extra
- * offset bits are output literally. */
- lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
- }
+ *(u8 *)(lens + num_lens) = saved;
}
/*
* @block_type
* The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
* LZX_BLOCKTYPE_VERBATIM).
- * @items
- * The array of matches/literals to output.
- * @num_items
- * Number of matches/literals to output (length of @items).
+ * @block_data
+ * The uncompressed data of the block.
+ * @sequences
+ * The matches and literals to output, given as a series of sequences.
* @codes
- * The main, length, and aligned offset Huffman codes for the current
- * LZX compressed block.
+ * The main, length, and aligned offset Huffman codes for the block.
*/
static void
-lzx_write_items(struct lzx_output_bitstream *os, int block_type,
- const struct lzx_item items[], u32 num_items,
- const struct lzx_codes *codes)
+lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
+ const u8 *block_data, const struct lzx_sequence sequences[],
+ const struct lzx_codes *codes)
{
- unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
+ const struct lzx_sequence *seq = sequences;
+ unsigned min_aligned_offset_slot;
+
+ if (block_type == LZX_BLOCKTYPE_ALIGNED)
+ min_aligned_offset_slot = LZX_MIN_ALIGNED_OFFSET_SLOT;
+ else
+ min_aligned_offset_slot = LZX_MAX_OFFSET_SLOTS;
+
+ for (;;) {
+ /* Output the next sequence. */
+
+ u32 litrunlen = seq->litrunlen_and_matchlen >> SEQ_MATCHLEN_BITS;
+ unsigned matchlen = seq->litrunlen_and_matchlen & SEQ_MATCHLEN_MASK;
+ STATIC_ASSERT((u32)~SEQ_MATCHLEN_MASK >> SEQ_MATCHLEN_BITS >=
+ SOFT_MAX_BLOCK_SIZE);
+ u32 adjusted_offset;
+ unsigned main_symbol;
+ unsigned offset_slot;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ /* Output the literal run of the sequence. */
+
+ if (litrunlen) { /* Is the literal run nonempty? */
+
+ /* Verify optimization is enabled on 64-bit */
+ STATIC_ASSERT(WORDBITS < 64 ||
+ CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
+
+ if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
+
+ /* 64-bit: write 3 literals at a time. */
+ while (litrunlen >= 3) {
+ unsigned lit0 = block_data[0];
+ unsigned lit1 = block_data[1];
+ unsigned lit2 = block_data[2];
+ lzx_add_bits(os, codes->codewords.main[lit0],
+ codes->lens.main[lit0]);
+ lzx_add_bits(os, codes->codewords.main[lit1],
+ codes->lens.main[lit1]);
+ lzx_add_bits(os, codes->codewords.main[lit2],
+ codes->lens.main[lit2]);
+ lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
+ block_data += 3;
+ litrunlen -= 3;
+ }
+ if (litrunlen--) {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ if (litrunlen--) {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
+ } else {
+ lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
+ }
+ }
+ } else {
+ /* 32-bit: write 1 literal at a time. */
+ do {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
+ } while (--litrunlen);
+ }
+ }
- for (u32 i = 0; i < num_items; i++)
- lzx_write_item(os, items[i], ones_if_aligned, codes);
+ /* Was this the last literal run? */
+ if (matchlen == 0)
+ return;
+
+ /* Nope; output the match. */
+
+ block_data += matchlen;
+
+ adjusted_offset = seq->adjusted_offset_and_mainsym >> SEQ_MAINSYM_BITS;
+ main_symbol = seq->adjusted_offset_and_mainsym & SEQ_MAINSYM_MASK;
+
+ offset_slot = (main_symbol - LZX_NUM_CHARS) / LZX_NUM_LEN_HEADERS;
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
+ extra_bits = adjusted_offset - (lzx_offset_slot_base[offset_slot] +
+ LZX_OFFSET_ADJUSTMENT);
+
+ #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + \
+ LENGTH_CODEWORD_LIMIT + \
+ LZX_MAX_NUM_EXTRA_BITS - \
+ LZX_NUM_ALIGNED_OFFSET_BITS + \
+ ALIGNED_CODEWORD_LIMIT)
+
+ /* Verify optimization is enabled on 64-bit */
+ STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS));
+
+ /* Output the main symbol for the match. */
+
+ lzx_add_bits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
+
+ /* If needed, output the length symbol for the match. */
+
+ if (matchlen >= LZX_MIN_SECONDARY_LEN) {
+ lzx_add_bits(os, codes->codewords.len[matchlen -
+ LZX_MIN_SECONDARY_LEN],
+ codes->lens.len[matchlen -
+ LZX_MIN_SECONDARY_LEN]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
+ }
+
+ /* Output the extra offset bits for the match. In aligned
+ * offset blocks, the lowest 3 bits of the adjusted offset are
+ * Huffman-encoded using the aligned offset code, provided that
+ * there are at least extra 3 offset bits required. All other
+ * extra offset bits are output verbatim. */
+
+ if (offset_slot >= min_aligned_offset_slot) {
+
+ lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
+ num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, LZX_MAX_NUM_EXTRA_BITS -
+ LZX_NUM_ALIGNED_OFFSET_BITS);
+
+ lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK],
+ codes->lens.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
+ } else {
+ STATIC_ASSERT(CAN_BUFFER(LZX_MAX_NUM_EXTRA_BITS));
+
+ lzx_add_bits(os, extra_bits, num_extra_bits);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, LZX_MAX_NUM_EXTRA_BITS);
+ }
+
+ if (CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, MAX_MATCH_BITS);
+
+ /* Advance to the next sequence. */
+ seq++;
+ }
}
-/* Write an LZX aligned offset or verbatim block to the output bitstream. */
static void
-lzx_write_compressed_block(int block_type,
+lzx_write_compressed_block(const u8 *block_begin,
+ int block_type,
u32 block_size,
unsigned window_order,
unsigned num_main_syms,
- struct lzx_item * chosen_items,
- u32 num_chosen_items,
+ const struct lzx_sequence sequences[],
const struct lzx_codes * codes,
const struct lzx_lens * prev_lens,
struct lzx_output_bitstream * os)
{
- LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
- block_type == LZX_BLOCKTYPE_VERBATIM);
-
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
lzx_write_bits(os, block_type, 3);
- /* Output the block size.
+ /*
+ * Output the block size.
*
- * The original LZX format seemed to always encode the block size in 3
- * bytes. However, the implementation in WIMGAPI, as used in WIM files,
- * uses the first bit to indicate whether the block is the default size
- * (32768) or a different size given explicitly by the next 16 bits.
+ * The original LZX format encoded the block size in 24 bits. However,
+ * the LZX format used in WIM archives uses 1 bit to specify whether the
+ * block has the default size of 32768 bytes, then optionally 16 bits to
+ * specify a non-default size. This works fine for Microsoft's WIM
+ * software (WIMGAPI), which never compresses more than 32768 bytes at a
+ * time with LZX. However, as an extension, our LZX compressor supports
+ * compressing up to 2097152 bytes, with a corresponding increase in
+ * window size. It is possible for blocks in these larger buffers to
+ * exceed 65535 bytes; such blocks cannot have their size represented in
+ * 16 bits.
*
- * By default, this compressor uses a window size of 32768 and therefore
- * follows the WIMGAPI behavior. However, this compressor also supports
- * window sizes greater than 32768 bytes, which do not appear to be
- * supported by WIMGAPI. In such cases, we retain the default size bit
- * to mean a size of 32768 bytes but output non-default block size in 24
- * bits rather than 16. The compatibility of this behavior is unknown
- * because WIMs created with chunk size greater than 32768 can seemingly
- * only be opened by wimlib anyway. */
+ * The chosen solution was to use 24 bits for the block size when
+ * possibly required --- specifically, when the compressor has been
+ * allocated to be capable of compressing more than 32768 bytes at once
+ * (which also causes the number of main symbols to be increased).
+ */
if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
lzx_write_bits(os, 1, 1);
} else {
LZX_LENCODE_NUM_SYMBOLS);
/* Output the compressed matches and literals. */
- lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
+ lzx_write_sequences(os, block_type, block_begin, sequences, codes);
}
-/* Don't allow matches to span the end of an LZX block. */
-static inline unsigned
-maybe_truncate_matches(struct lz_match matches[], unsigned num_matches,
- struct lzx_compressor *c)
+/*
+ * Given the frequencies of symbols in an LZX-compressed block and the
+ * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
+ * will take fewer bits to output.
+ */
+static int
+lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
+ const struct lzx_codes * codes)
{
- if (c->match_window_end < c->cur_window_size && num_matches != 0) {
- u32 limit = c->match_window_end - c->match_window_pos;
+ u32 verbatim_cost = 0;
+ u32 aligned_cost = 0;
- if (limit >= LZX_MIN_MATCH_LEN) {
+ /* A verbatim block requires 3 bits in each place that an aligned offset
+ * symbol would be used in an aligned offset block. */
+ for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i];
+ aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
+ }
- unsigned i = num_matches - 1;
- do {
- if (matches[i].len >= limit) {
- matches[i].len = limit;
+ /* Account for the cost of sending the codeword lengths of the aligned
+ * offset code. */
+ aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE *
+ LZX_ALIGNEDCODE_NUM_SYMBOLS;
- /* Truncation might produce multiple
- * matches with length 'limit'. Keep at
- * most 1. */
- num_matches = i + 1;
- }
- } while (i--);
- } else {
- num_matches = 0;
- }
- }
- return num_matches;
+ if (aligned_cost < verbatim_cost)
+ return LZX_BLOCKTYPE_ALIGNED;
+ else
+ return LZX_BLOCKTYPE_VERBATIM;
}
-static unsigned
-lzx_get_matches_fillcache_singleblock(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+/*
+ * Flush an LZX block:
+ *
+ * 1. Build the Huffman codes.
+ * 2. Decide whether to output the block as VERBATIM or ALIGNED.
+ * 3. Write the block.
+ * 4. Swap the indices of the current and previous Huffman codes.
+ *
+ * Note: we never output UNCOMPRESSED blocks. This probably should be
+ * implemented sometime, but it doesn't make much difference.
+ */
+static void
+lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+ const u8 *block_begin, u32 block_size, u32 seq_idx)
{
- struct lz_match *cache_ptr;
- struct lz_match *matches;
- unsigned num_matches;
-
- cache_ptr = c->cache_ptr;
- matches = cache_ptr + 1;
- if (likely(cache_ptr <= c->cache_limit)) {
- num_matches = lz_mf_get_matches(c->mf, matches);
- cache_ptr->len = num_matches;
- c->cache_ptr = matches + num_matches;
- } else {
- num_matches = 0;
- }
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
-}
+ int block_type;
-static unsigned
-lzx_get_matches_fillcache_multiblock(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
-{
- struct lz_match *cache_ptr;
- struct lz_match *matches;
- unsigned num_matches;
-
- cache_ptr = c->cache_ptr;
- matches = cache_ptr + 1;
- if (likely(cache_ptr <= c->cache_limit)) {
- num_matches = lz_mf_get_matches(c->mf, matches);
- num_matches = maybe_truncate_matches(matches, num_matches, c);
- cache_ptr->len = num_matches;
- c->cache_ptr = matches + num_matches;
- } else {
- num_matches = 0;
- }
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
+ lzx_build_huffman_codes(c);
+
+ block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
+ &c->codes[c->codes_index]);
+ lzx_write_compressed_block(block_begin,
+ block_type,
+ block_size,
+ c->window_order,
+ c->num_main_syms,
+ &c->chosen_sequences[seq_idx],
+ &c->codes[c->codes_index],
+ &c->codes[c->codes_index ^ 1].lens,
+ os);
+ c->codes_index ^= 1;
}
-static unsigned
-lzx_get_matches_usecache(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+/******************************************************************************/
+/* Block splitting algorithm */
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The problem of block splitting is to decide when it is worthwhile to start a
+ * new block with new entropy codes. There is a theoretically optimal solution:
+ * recursively consider every possible block split, considering the exact cost
+ * of each block, and choose the minimum cost approach. But this is far too
+ * slow. Instead, as an approximation, we can count symbols and after every N
+ * symbols, compare the expected distribution of symbols based on the previous
+ * data with the actual distribution. If they differ "by enough", then start a
+ * new block.
+ *
+ * As an optimization and heuristic, we don't distinguish between every symbol
+ * but rather we combine many symbols into a single "observation type". For
+ * literals we only look at the high bits and low bits, and for matches we only
+ * look at whether the match is long or not. The assumption is that for typical
+ * "real" data, places that are good block boundaries will tend to be noticable
+ * based only on changes in these aggregate frequencies, without looking for
+ * subtle differences in individual symbols. For example, a change from ASCII
+ * bytes to non-ASCII bytes, or from few matches (generally less compressible)
+ * to many matches (generally more compressible), would be easily noticed based
+ * on the aggregates.
+ *
+ * For determining whether the frequency distributions are "different enough" to
+ * start a new block, the simply heuristic of splitting when the sum of absolute
+ * differences exceeds a constant seems to be good enough.
+ *
+ * Finally, for an approximation, it is not strictly necessary that the exact
+ * symbols being used are considered. With "near-optimal parsing", for example,
+ * the actual symbols that will be used are unknown until after the block
+ * boundary is chosen and the block has been optimized. Since the final choices
+ * cannot be used, we can use preliminary "greedy" choices instead.
+ */
+
+/* Initialize the block split statistics when starting a new block. */
+static void
+lzx_init_block_split_stats(struct lzx_block_split_stats *stats)
{
- struct lz_match *cache_ptr;
- struct lz_match *matches;
- unsigned num_matches;
-
- cache_ptr = c->cache_ptr;
- matches = cache_ptr + 1;
- if (cache_ptr <= c->cache_limit) {
- num_matches = cache_ptr->len;
- c->cache_ptr = matches + num_matches;
- } else {
- num_matches = 0;
- }
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
+ memset(stats, 0, sizeof(*stats));
}
-static unsigned
-lzx_get_matches_usecache_nocheck(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
+ * literal, for 8 possible literal observation types. */
+static forceinline void
+lzx_observe_literal(struct lzx_block_split_stats *stats, u8 lit)
{
- struct lz_match *cache_ptr;
- struct lz_match *matches;
- unsigned num_matches;
-
- cache_ptr = c->cache_ptr;
- matches = cache_ptr + 1;
- num_matches = cache_ptr->len;
- c->cache_ptr = matches + num_matches;
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
+ stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
+ stats->num_new_observations++;
}
-static unsigned
-lzx_get_matches_nocache_singleblock(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+/* Match observation. Heuristic: use one observation type for "short match" and
+ * one observation type for "long match". */
+static forceinline void
+lzx_observe_match(struct lzx_block_split_stats *stats, unsigned length)
{
- struct lz_match *matches;
- unsigned num_matches;
-
- matches = c->cache_ptr;
- num_matches = lz_mf_get_matches(c->mf, matches);
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
+ stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
+ stats->num_new_observations++;
}
-static unsigned
-lzx_get_matches_nocache_multiblock(struct lzx_compressor *c,
- const struct lz_match **matches_ret)
+static bool
+lzx_should_end_block(struct lzx_block_split_stats *stats)
{
- struct lz_match *matches;
- unsigned num_matches;
-
- matches = c->cache_ptr;
- num_matches = lz_mf_get_matches(c->mf, matches);
- num_matches = maybe_truncate_matches(matches, num_matches, c);
- c->match_window_pos++;
- *matches_ret = matches;
- return num_matches;
+ if (stats->num_observations > 0) {
+
+ /* Note: to avoid slow divisions, we do not divide by
+ * 'num_observations', but rather do all math with the numbers
+ * multiplied by 'num_observations'. */
+ u32 total_delta = 0;
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ u32 expected = stats->observations[i] *
+ stats->num_new_observations;
+ u32 actual = stats->new_observations[i] *
+ stats->num_observations;
+ u32 delta = (actual > expected) ? actual - expected :
+ expected - actual;
+ total_delta += delta;
+ }
+
+ /* Ready to end the block? */
+ if (total_delta >=
+ stats->num_new_observations * 7 / 8 * stats->num_observations)
+ return true;
+ }
+
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ stats->num_observations += stats->new_observations[i];
+ stats->observations[i] += stats->new_observations[i];
+ stats->new_observations[i] = 0;
+ }
+ stats->num_new_observations = 0;
+ return false;
}
+/******************************************************************************/
+/* Slower ("near-optimal") compression algorithm */
+/*----------------------------------------------------------------------------*/
+
/*
- * Find matches at the next position in the window.
- *
- * This uses a wrapper function around the underlying match-finder.
+ * Least-recently-used queue for match offsets.
*
- * Returns the number of matches found and sets *matches_ret to point to the
- * matches array. The matches will be sorted by strictly increasing length and
- * offset.
+ * This is represented as a 64-bit integer for efficiency. There are three
+ * offsets of 21 bits each. Bit 64 is garbage.
*/
-static inline unsigned
-lzx_get_matches(struct lzx_compressor *c, const struct lz_match **matches_ret)
+struct lzx_lru_queue {
+ u64 R;
+} _aligned_attribute(8);
+
+#define LZX_QUEUE_OFFSET_SHIFT 21
+#define LZX_QUEUE_OFFSET_MASK (((u64)1 << LZX_QUEUE_OFFSET_SHIFT) - 1)
+
+#define LZX_QUEUE_R0_SHIFT (0 * LZX_QUEUE_OFFSET_SHIFT)
+#define LZX_QUEUE_R1_SHIFT (1 * LZX_QUEUE_OFFSET_SHIFT)
+#define LZX_QUEUE_R2_SHIFT (2 * LZX_QUEUE_OFFSET_SHIFT)
+
+#define LZX_QUEUE_R0_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R0_SHIFT)
+#define LZX_QUEUE_R1_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R1_SHIFT)
+#define LZX_QUEUE_R2_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R2_SHIFT)
+
+#define LZX_QUEUE_INITIALIZER { \
+ ((u64)1 << LZX_QUEUE_R0_SHIFT) | \
+ ((u64)1 << LZX_QUEUE_R1_SHIFT) | \
+ ((u64)1 << LZX_QUEUE_R2_SHIFT) }
+
+static forceinline u64
+lzx_lru_queue_R0(struct lzx_lru_queue queue)
{
- return (*c->get_matches_func)(c, matches_ret);
+ return (queue.R >> LZX_QUEUE_R0_SHIFT) & LZX_QUEUE_OFFSET_MASK;
}
-static void
-lzx_skip_bytes_fillcache(struct lzx_compressor *c, unsigned n)
+static forceinline u64
+lzx_lru_queue_R1(struct lzx_lru_queue queue)
{
- struct lz_match *cache_ptr;
-
- cache_ptr = c->cache_ptr;
- c->match_window_pos += n;
- lz_mf_skip_positions(c->mf, n);
- if (cache_ptr <= c->cache_limit) {
- do {
- cache_ptr->len = 0;
- cache_ptr += 1;
- } while (--n && cache_ptr <= c->cache_limit);
- }
- c->cache_ptr = cache_ptr;
+ return (queue.R >> LZX_QUEUE_R1_SHIFT) & LZX_QUEUE_OFFSET_MASK;
}
-static void
-lzx_skip_bytes_usecache(struct lzx_compressor *c, unsigned n)
+static forceinline u64
+lzx_lru_queue_R2(struct lzx_lru_queue queue)
{
- struct lz_match *cache_ptr;
-
- cache_ptr = c->cache_ptr;
- c->match_window_pos += n;
- if (cache_ptr <= c->cache_limit) {
- do {
- cache_ptr += 1 + cache_ptr->len;
- } while (--n && cache_ptr <= c->cache_limit);
- }
- c->cache_ptr = cache_ptr;
+ return (queue.R >> LZX_QUEUE_R2_SHIFT) & LZX_QUEUE_OFFSET_MASK;
}
-static void
-lzx_skip_bytes_usecache_nocheck(struct lzx_compressor *c, unsigned n)
+/* Push a match offset onto the front (most recently used) end of the queue. */
+static forceinline struct lzx_lru_queue
+lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset)
{
- struct lz_match *cache_ptr;
+ return (struct lzx_lru_queue) {
+ .R = (queue.R << LZX_QUEUE_OFFSET_SHIFT) | offset,
+ };
+}
- cache_ptr = c->cache_ptr;
- c->match_window_pos += n;
- do {
- cache_ptr += 1 + cache_ptr->len;
- } while (--n);
- c->cache_ptr = cache_ptr;
+/* Swap a match offset to the front of the queue. */
+static forceinline struct lzx_lru_queue
+lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
+{
+ unsigned shift = idx * 21;
+ const u64 mask = LZX_QUEUE_R0_MASK;
+ const u64 mask_high = mask << shift;
+
+ return (struct lzx_lru_queue) {
+ (queue.R & ~(mask | mask_high)) |
+ ((queue.R & mask_high) >> shift) |
+ ((queue.R & mask) << shift)
+ };
}
-static void
-lzx_skip_bytes_nocache(struct lzx_compressor *c, unsigned n)
+static forceinline u32
+lzx_walk_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit,
+ bool record)
{
- c->match_window_pos += n;
- lz_mf_skip_positions(c->mf, n);
+ struct lzx_sequence *seq =
+ &c->chosen_sequences[ARRAY_LEN(c->chosen_sequences) - 1];
+ u32 node_idx = block_size;
+ u32 litrun_end; /* if record=true: end of the current literal run */
+
+ if (record) {
+ /* The last sequence has matchlen 0 */
+ seq->litrunlen_and_matchlen = 0;
+ litrun_end = node_idx;
+ }
+
+ for (;;) {
+ u32 item;
+ unsigned matchlen;
+ u32 adjusted_offset;
+ unsigned mainsym;
+
+ /* Tally literals until either a match or the beginning of the
+ * block is reached. Note: the item in the node at the
+ * beginning of the block (c->optimum_nodes[0]) has all bits
+ * set, causing this loop to end when it is reached. */
+ for (;;) {
+ item = c->optimum_nodes[node_idx].item;
+ if (item & OPTIMUM_LEN_MASK)
+ break;
+ c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
+ node_idx--;
+ }
+
+ #if CONSIDER_GAP_MATCHES
+ if (item & OPTIMUM_GAP_MATCH) {
+ if (node_idx == 0)
+ break;
+ /* Tally/record the rep0 match after the gap. */
+ matchlen = item & OPTIMUM_LEN_MASK;
+ mainsym = lzx_tally_main_and_lensyms(c, matchlen, 0,
+ is_16_bit);
+ if (record) {
+ seq->litrunlen_and_matchlen |=
+ (litrun_end - node_idx) <<
+ SEQ_MATCHLEN_BITS;
+ seq--;
+ seq->litrunlen_and_matchlen = matchlen;
+ seq->adjusted_offset_and_mainsym = mainsym;
+ litrun_end = node_idx - matchlen;
+ }
+
+ /* Tally the literal in the gap. */
+ c->freqs.main[(u8)(item >> OPTIMUM_OFFSET_SHIFT)]++;
+
+ /* Fall through and tally the match before the gap.
+ * (It was temporarily saved in the 'cost' field of the
+ * previous node, which was free to reuse.) */
+ item = c->optimum_nodes[--node_idx].cost;
+ node_idx -= matchlen;
+ }
+ #else /* CONSIDER_GAP_MATCHES */
+ if (node_idx == 0)
+ break;
+ #endif /* !CONSIDER_GAP_MATCHES */
+
+ /* Tally/record a match. */
+ matchlen = item & OPTIMUM_LEN_MASK;
+ adjusted_offset = item >> OPTIMUM_OFFSET_SHIFT;
+ mainsym = lzx_tally_main_and_lensyms(c, matchlen,
+ adjusted_offset,
+ is_16_bit);
+ if (adjusted_offset >= LZX_MIN_ALIGNED_OFFSET +
+ LZX_OFFSET_ADJUSTMENT)
+ c->freqs.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK]++;
+ if (record) {
+ seq->litrunlen_and_matchlen |=
+ (litrun_end - node_idx) << SEQ_MATCHLEN_BITS;
+ seq--;
+ seq->litrunlen_and_matchlen = matchlen;
+ seq->adjusted_offset_and_mainsym =
+ (adjusted_offset << SEQ_MAINSYM_BITS) | mainsym;
+ litrun_end = node_idx - matchlen;
+ }
+ node_idx -= matchlen;
+ }
+
+ /* Record the literal run length for the first sequence. */
+ if (record) {
+ seq->litrunlen_and_matchlen |=
+ (litrun_end - node_idx) << SEQ_MATCHLEN_BITS;
+ }
+
+ /* Return the index in chosen_sequences at which the sequences begin. */
+ return seq - &c->chosen_sequences[0];
}
/*
- * Skip the specified number of positions in the window (don't search for
- * matches at them).
+ * Given the minimum-cost path computed through the item graph for the current
+ * block, walk the path and count how many of each symbol in each Huffman-coded
+ * alphabet would be required to output the items (matches and literals) along
+ * the path.
*
- * This uses a wrapper function around the underlying match-finder.
+ * Note that the path will be walked backwards (from the end of the block to the
+ * beginning of the block), but this doesn't matter because this function only
+ * computes frequencies.
*/
-static inline void
-lzx_skip_bytes(struct lzx_compressor *c, unsigned n)
+static forceinline void
+lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
- return (*c->skip_bytes_func)(c, n);
+ lzx_walk_item_list(c, block_size, is_16_bit, false);
}
-/* Tally, and optionally record, the specified literal byte. */
-static inline void
-lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
- struct lzx_item **next_chosen_item)
+/*
+ * Like lzx_tally_item_list(), but this function also generates the list of
+ * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
+ * ready to be output to the bitstream after the Huffman codes are computed.
+ * The lzx_sequences will be written to decreasing memory addresses as the path
+ * is walked backwards, which means they will end up in the expected
+ * first-to-last order. The return value is the index in c->chosen_sequences at
+ * which the lzx_sequences begin.
+ */
+static forceinline u32
+lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
- unsigned main_symbol = literal;
-
- c->freqs.main[main_symbol]++;
-
- if (next_chosen_item) {
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = main_symbol,
- };
- }
+ return lzx_walk_item_list(c, block_size, is_16_bit, true);
}
-/* Tally, and optionally record, the specified repeat offset match. */
-static inline void
-lzx_declare_repeat_offset_match(struct lzx_compressor *c,
- unsigned len, unsigned rep_index,
- struct lzx_item **next_chosen_item)
+/*
+ * Find an inexpensive path through the graph of possible match/literal choices
+ * for the current block. The nodes of the graph are
+ * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
+ * the current block, plus one extra node for end-of-block. The edges of the
+ * graph are matches and literals. The goal is to find the minimum cost path
+ * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost
+ * model 'c->costs'.
+ *
+ * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
+ * proceeding forwards one node at a time. At each node, a selection of matches
+ * (len >= 2), as well as the literal byte (len = 1), is considered. An item of
+ * length 'len' provides a new path to reach the node 'len' bytes later. If
+ * such a path is the lowest cost found so far to reach that later node, then
+ * that later node is updated with the new cost and the "arrival" which provided
+ * that cost.
+ *
+ * Note that although this algorithm is based on minimum cost path search, due
+ * to various simplifying assumptions the result is not guaranteed to be the
+ * true minimum cost, or "optimal", path over the graph of all valid LZX
+ * representations of this block.
+ *
+ * Also, note that because of the presence of the recent offsets queue (which is
+ * a type of adaptive state), the algorithm cannot work backwards and compute
+ * "cost to end" instead of "cost to beginning". Furthermore, the way the
+ * algorithm handles this adaptive state in the "minimum cost" parse is actually
+ * only an approximation. It's possible for the globally optimal, minimum cost
+ * path to contain a prefix, ending at a position, where that path prefix is
+ * *not* the minimum cost path to that position. This can happen if such a path
+ * prefix results in a different adaptive state which results in lower costs
+ * later. The algorithm does not solve this problem in general; it only looks
+ * one step ahead, with the exception of special consideration for "gap
+ * matches".
+ */
+static forceinline struct lzx_lru_queue
+lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
+ const u8 * const restrict block_begin,
+ const u32 block_size,
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
- unsigned len_header;
- unsigned main_symbol;
- unsigned len_symbol;
+ struct lzx_optimum_node *cur_node = c->optimum_nodes;
+ struct lzx_optimum_node * const end_node = cur_node + block_size;
+ struct lz_match *cache_ptr = c->match_cache;
+ const u8 *in_next = block_begin;
+ const u8 * const block_end = block_begin + block_size;
+
+ /*
+ * Instead of storing the match offset LRU queues in the
+ * 'lzx_optimum_node' structures, we save memory (and cache lines) by
+ * storing them in a smaller array. This works because the algorithm
+ * only requires a limited history of the adaptive state. Once a given
+ * state is more than LZX_MAX_MATCH_LEN bytes behind the current node
+ * (more if gap match consideration is enabled; we just round up to 512
+ * so it's a power of 2), it is no longer needed.
+ *
+ * The QUEUE() macro finds the queue for the given node. This macro has
+ * been optimized by taking advantage of 'struct lzx_lru_queue' and
+ * 'struct lzx_optimum_node' both being 8 bytes in size and alignment.
+ */
+ struct lzx_lru_queue queues[512];
+ STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
+ STATIC_ASSERT(sizeof(c->optimum_nodes[0]) == sizeof(queues[0]));
+#define QUEUE(node) \
+ (*(struct lzx_lru_queue *)((char *)queues + \
+ ((uintptr_t)(node) % (ARRAY_LEN(queues) * sizeof(queues[0])))))
+ /*(queues[(uintptr_t)(node) / sizeof(*(node)) % ARRAY_LEN(queues)])*/
+
+#if CONSIDER_GAP_MATCHES
+ u32 matches_before_gap[ARRAY_LEN(queues)];
+#define MATCH_BEFORE_GAP(node) \
+ (matches_before_gap[(uintptr_t)(node) / sizeof(*(node)) % \
+ ARRAY_LEN(matches_before_gap)])
+#endif
- if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
- len_header = len - LZX_MIN_MATCH_LEN;
- len_symbol = LZX_LENCODE_NUM_SYMBOLS;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
- c->freqs.len[len_symbol]++;
- }
+ /*
+ * Initially, the cost to reach each node is "infinity".
+ *
+ * The first node actually should have cost 0, but "infinity"
+ * (0xFFFFFFFF) works just as well because it immediately overflows.
+ *
+ * The following statement also intentionally sets the 'item' of the
+ * first node, which would otherwise have no meaning, to 0xFFFFFFFF for
+ * use as a sentinel. See lzx_walk_item_list().
+ */
+ memset(c->optimum_nodes, 0xFF,
+ (block_size + 1) * sizeof(c->optimum_nodes[0]));
- main_symbol = LZX_NUM_CHARS + ((rep_index << 3) | len_header);
+ /* Initialize the recent offsets queue for the first node. */
+ QUEUE(cur_node) = initial_queue;
- c->freqs.main[main_symbol]++;
+ do { /* For each node in the block in position order... */
- if (next_chosen_item) {
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = (u64)main_symbol | ((u64)len_symbol << 10),
- };
- }
-}
+ unsigned num_matches;
+ unsigned literal;
+ u32 cost;
-/* Tally, and optionally record, the specified explicit offset match. */
-static inline void
-lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
- struct lzx_item **next_chosen_item)
-{
- unsigned len_header;
- unsigned main_symbol;
- unsigned len_symbol;
- unsigned offset_slot;
- unsigned num_extra_bits;
- u32 extra_bits;
-
- if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
- len_header = len - LZX_MIN_MATCH_LEN;
- len_symbol = LZX_LENCODE_NUM_SYMBOLS;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
- c->freqs.len[len_symbol]++;
- }
+ /*
+ * A selection of matches for the block was already saved in
+ * memory so that we don't have to run the uncompressed data
+ * through the matchfinder on every optimization pass. However,
+ * we still search for repeat offset matches during each
+ * optimization pass because we cannot predict the state of the
+ * recent offsets queue. But as a heuristic, we don't bother
+ * searching for repeat offset matches if the general-purpose
+ * matchfinder failed to find any matches.
+ *
+ * Note that a match of length n at some offset implies there is
+ * also a match of length l for LZX_MIN_MATCH_LEN <= l <= n at
+ * that same offset. In other words, we don't necessarily need
+ * to use the full length of a match. The key heuristic that
+ * saves a significicant amount of time is that for each
+ * distinct length, we only consider the smallest offset for
+ * which that length is available. This heuristic also applies
+ * to repeat offsets, which we order specially: R0 < R1 < R2 <
+ * any explicit offset. Of course, this heuristic may be
+ * produce suboptimal results because offset slots in LZX are
+ * subject to entropy encoding, but in practice this is a useful
+ * heuristic.
+ */
- offset_slot = lzx_get_offset_slot_raw(offset + LZX_OFFSET_OFFSET);
+ num_matches = cache_ptr->length;
+ cache_ptr++;
- main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
+ if (num_matches) {
+ struct lz_match *end_matches = cache_ptr + num_matches;
+ unsigned next_len = LZX_MIN_MATCH_LEN;
+ unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
+ const u8 *matchptr;
+
+ /* Consider rep0 matches. */
+ matchptr = in_next - lzx_lru_queue_R0(QUEUE(cur_node));
+ if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
+ goto rep0_done;
+ STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
+ do {
+ u32 cost = cur_node->cost +
+ c->costs.match_cost[0][
+ next_len - LZX_MIN_MATCH_LEN];
+ if (cost <= (cur_node + next_len)->cost) {
+ (cur_node + next_len)->cost = cost;
+ (cur_node + next_len)->item =
+ (0 << OPTIMUM_OFFSET_SHIFT) | next_len;
+ }
+ if (unlikely(++next_len > max_len)) {
+ cache_ptr = end_matches;
+ goto done_matches;
+ }
+ } while (in_next[next_len - 1] == matchptr[next_len - 1]);
+
+ rep0_done:
+
+ /* Consider rep1 matches. */
+ matchptr = in_next - lzx_lru_queue_R1(QUEUE(cur_node));
+ if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
+ goto rep1_done;
+ if (matchptr[next_len - 1] != in_next[next_len - 1])
+ goto rep1_done;
+ for (unsigned len = 2; len < next_len - 1; len++)
+ if (matchptr[len] != in_next[len])
+ goto rep1_done;
+ do {
+ u32 cost = cur_node->cost +
+ c->costs.match_cost[1][
+ next_len - LZX_MIN_MATCH_LEN];
+ if (cost <= (cur_node + next_len)->cost) {
+ (cur_node + next_len)->cost = cost;
+ (cur_node + next_len)->item =
+ (1 << OPTIMUM_OFFSET_SHIFT) | next_len;
+ }
+ if (unlikely(++next_len > max_len)) {
+ cache_ptr = end_matches;
+ goto done_matches;
+ }
+ } while (in_next[next_len - 1] == matchptr[next_len - 1]);
+
+ rep1_done:
+
+ /* Consider rep2 matches. */
+ matchptr = in_next - lzx_lru_queue_R2(QUEUE(cur_node));
+ if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
+ goto rep2_done;
+ if (matchptr[next_len - 1] != in_next[next_len - 1])
+ goto rep2_done;
+ for (unsigned len = 2; len < next_len - 1; len++)
+ if (matchptr[len] != in_next[len])
+ goto rep2_done;
+ do {
+ u32 cost = cur_node->cost +
+ c->costs.match_cost[2][
+ next_len - LZX_MIN_MATCH_LEN];
+ if (cost <= (cur_node + next_len)->cost) {
+ (cur_node + next_len)->cost = cost;
+ (cur_node + next_len)->item =
+ (2 << OPTIMUM_OFFSET_SHIFT) | next_len;
+ }
+ if (unlikely(++next_len > max_len)) {
+ cache_ptr = end_matches;
+ goto done_matches;
+ }
+ } while (in_next[next_len - 1] == matchptr[next_len - 1]);
+
+ rep2_done:
+
+ while (next_len > cache_ptr->length)
+ if (++cache_ptr == end_matches)
+ goto done_matches;
+
+ /* Consider explicit offset matches. */
+ for (;;) {
+ u32 offset = cache_ptr->offset;
+ u32 adjusted_offset = offset + LZX_OFFSET_ADJUSTMENT;
+ unsigned offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit);
+ u32 base_cost = cur_node->cost;
+ u32 cost;
+
+ #if CONSIDER_ALIGNED_COSTS
+ if (offset >= LZX_MIN_ALIGNED_OFFSET)
+ base_cost += c->costs.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK];
+ #endif
+ do {
+ cost = base_cost +
+ c->costs.match_cost[offset_slot][
+ next_len - LZX_MIN_MATCH_LEN];
+ if (cost < (cur_node + next_len)->cost) {
+ (cur_node + next_len)->cost = cost;
+ (cur_node + next_len)->item =
+ (adjusted_offset << OPTIMUM_OFFSET_SHIFT) | next_len;
+ }
+ } while (++next_len <= cache_ptr->length);
+
+ if (++cache_ptr == end_matches) {
+ #if CONSIDER_GAP_MATCHES
+ /* Also consider the longest explicit
+ * offset match as a "gap match": match
+ * + lit + rep0. */
+ s32 remaining = (block_end - in_next) - (s32)next_len;
+ if (likely(remaining >= 2)) {
+ const u8 *strptr = in_next + next_len;
+ const u8 *matchptr = strptr - offset;
+ if (load_u16_unaligned(strptr) == load_u16_unaligned(matchptr)) {
+ STATIC_ASSERT(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2 >= 250);
+ STATIC_ASSERT(ARRAY_LEN(queues) == ARRAY_LEN(matches_before_gap));
+ unsigned limit = min(remaining,
+ min(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2,
+ LZX_MAX_MATCH_LEN));
+ unsigned rep0_len = lz_extend(strptr, matchptr, 2, limit);
+ u8 lit = strptr[-1];
+ cost += c->costs.main[lit] +
+ c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN];
+ unsigned total_len = next_len + rep0_len;
+ if (cost < (cur_node + total_len)->cost) {
+ (cur_node + total_len)->cost = cost;
+ (cur_node + total_len)->item =
+ OPTIMUM_GAP_MATCH |
+ ((u32)lit << OPTIMUM_OFFSET_SHIFT) |
+ rep0_len;
+ MATCH_BEFORE_GAP(cur_node + total_len) =
+ (adjusted_offset << OPTIMUM_OFFSET_SHIFT) |
+ (next_len - 1);
+ }
+ }
+ }
+ #endif /* CONSIDER_GAP_MATCHES */
+ break;
+ }
+ }
+ }
- c->freqs.main[main_symbol]++;
+ done_matches:
- if (offset_slot >= 8)
- c->freqs.aligned[(offset + LZX_OFFSET_OFFSET) & 7]++;
+ /* Consider coding a literal.
- if (next_chosen_item) {
+ * To avoid an extra branch, actually checking the preferability
+ * of coding the literal is integrated into the queue update
+ * code below. */
+ literal = *in_next++;
+ cost = cur_node->cost + c->costs.main[literal];
- num_extra_bits = lzx_extra_offset_bits[offset_slot];
+ /* Advance to the next position. */
+ cur_node++;
- extra_bits = (offset + LZX_OFFSET_OFFSET) -
- lzx_offset_slot_base[offset_slot];
+ /* The lowest-cost path to the current position is now known.
+ * Finalize the recent offsets queue that results from taking
+ * this lowest-cost path. */
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = (u64)main_symbol |
- ((u64)len_symbol << 10) |
- ((u64)num_extra_bits << 18) |
- ((u64)extra_bits << 23),
- };
- }
-}
+ if (cost <= cur_node->cost) {
+ /* Literal: queue remains unchanged. */
+ cur_node->cost = cost;
+ cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
+ QUEUE(cur_node) = QUEUE(cur_node - 1);
+ } else {
+ /* Match: queue update is needed. */
+ unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
+ #if CONSIDER_GAP_MATCHES
+ s32 adjusted_offset = (s32)cur_node->item >> OPTIMUM_OFFSET_SHIFT;
+ STATIC_ASSERT(OPTIMUM_GAP_MATCH == 0x80000000); /* assuming sign extension */
+ #else
+ u32 adjusted_offset = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
+ #endif
+
+ if (adjusted_offset >= LZX_NUM_RECENT_OFFSETS) {
+ /* Explicit offset match: insert offset at front. */
+ QUEUE(cur_node) =
+ lzx_lru_queue_push(QUEUE(cur_node - len),
+ adjusted_offset - LZX_OFFSET_ADJUSTMENT);
+ }
+ #if CONSIDER_GAP_MATCHES
+ else if (adjusted_offset < 0) {
+ /* "Gap match": Explicit offset match, then a
+ * literal, then rep0 match. Save the explicit
+ * offset match information in the cost field of
+ * the previous node, which isn't needed
+ * anymore. Then insert the offset at the front
+ * of the queue. */
+ u32 match_before_gap = MATCH_BEFORE_GAP(cur_node);
+ (cur_node - 1)->cost = match_before_gap;
+ QUEUE(cur_node) =
+ lzx_lru_queue_push(QUEUE(cur_node - len - 1 -
+ (match_before_gap & OPTIMUM_LEN_MASK)),
+ (match_before_gap >> OPTIMUM_OFFSET_SHIFT) -
+ LZX_OFFSET_ADJUSTMENT);
+ }
+ #endif
+ else {
+ /* Repeat offset match: swap offset to front. */
+ QUEUE(cur_node) =
+ lzx_lru_queue_swap(QUEUE(cur_node - len),
+ adjusted_offset);
+ }
+ }
+ } while (cur_node != end_node);
-/* Tally, and optionally record, the specified match or literal. */
-static inline void
-lzx_declare_item(struct lzx_compressor *c, u32 mc_item_data,
- struct lzx_item **next_chosen_item)
-{
- u32 len = mc_item_data & MC_LEN_MASK;
- u32 offset_data = mc_item_data >> MC_OFFSET_SHIFT;
-
- if (len == 1)
- lzx_declare_literal(c, offset_data, next_chosen_item);
- else if (offset_data < LZX_NUM_RECENT_OFFSETS)
- lzx_declare_repeat_offset_match(c, len, offset_data,
- next_chosen_item);
- else
- lzx_declare_explicit_offset_match(c, len,
- offset_data - LZX_OFFSET_OFFSET,
- next_chosen_item);
+ /* Return the recent offsets queue at the end of the path. */
+ return QUEUE(cur_node);
}
-static inline void
-lzx_record_item_list(struct lzx_compressor *c,
- struct lzx_mc_pos_data *cur_optimum_ptr,
- struct lzx_item **next_chosen_item)
+/*
+ * Given the costs for the main and length codewords (c->costs.main and
+ * c->costs.len), initialize the match cost array (c->costs.match_cost) which
+ * directly provides the cost of every possible (length, offset slot) pair.
+ */
+static void
+lzx_compute_match_costs(struct lzx_compressor *c)
{
- struct lzx_mc_pos_data *end_optimum_ptr;
- u32 saved_item;
- u32 item;
+ unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+ LZX_NUM_LEN_HEADERS;
+ struct lzx_costs *costs = &c->costs;
+ unsigned main_symbol = LZX_NUM_CHARS;
+
+ for (unsigned offset_slot = 0; offset_slot < num_offset_slots;
+ offset_slot++)
+ {
+ u32 extra_cost = lzx_extra_offset_bits[offset_slot] * BIT_COST;
+ unsigned i;
+
+ #if CONSIDER_ALIGNED_COSTS
+ if (offset_slot >= LZX_MIN_ALIGNED_OFFSET_SLOT)
+ extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * BIT_COST;
+ #endif
+
+ for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) {
+ costs->match_cost[offset_slot][i] =
+ costs->main[main_symbol++] + extra_cost;
+ }
- /* The list is currently in reverse order (last item to first item).
- * Reverse it. */
- end_optimum_ptr = cur_optimum_ptr;
- saved_item = cur_optimum_ptr->mc_item_data;
- do {
- item = saved_item;
- cur_optimum_ptr -= item & MC_LEN_MASK;
- saved_item = cur_optimum_ptr->mc_item_data;
- cur_optimum_ptr->mc_item_data = item;
- } while (cur_optimum_ptr != c->optimum);
-
- /* Walk the list of items from beginning to end, tallying and recording
- * each item. */
- do {
- lzx_declare_item(c, cur_optimum_ptr->mc_item_data, next_chosen_item);
- cur_optimum_ptr += (cur_optimum_ptr->mc_item_data) & MC_LEN_MASK;
- } while (cur_optimum_ptr != end_optimum_ptr);
+ extra_cost += costs->main[main_symbol++];
+
+ for (; i < LZX_NUM_LENS; i++) {
+ costs->match_cost[offset_slot][i] =
+ costs->len[i - LZX_NUM_PRIMARY_LENS] +
+ extra_cost;
+ }
+ }
}
-static inline void
-lzx_tally_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr)
+/*
+ * Fast approximation for log2f(x). This is not as accurate as the standard C
+ * version. It does not need to be perfectly accurate because it is only used
+ * for estimating symbol costs, which is very approximate anyway.
+ */
+static float
+log2f_fast(float x)
{
- /* Since we're just tallying the items, we don't need to reverse the
- * list. Processing the items in reverse order is fine. */
- do {
- lzx_declare_item(c, cur_optimum_ptr->mc_item_data, NULL);
- cur_optimum_ptr -= (cur_optimum_ptr->mc_item_data & MC_LEN_MASK);
- } while (cur_optimum_ptr != c->optimum);
+ union {
+ float f;
+ s32 i;
+ } u = { .f = x };
+
+ /* Extract the exponent and subtract 127 to remove the bias. This gives
+ * the integer part of the result. */
+ float res = ((u.i >> 23) & 0xFF) - 127;
+
+ /* Set the exponent to 0 (plus bias of 127). This transforms the number
+ * to the range [1, 2) while retaining the same mantissa. */
+ u.i = (u.i & ~(0xFF << 23)) | (127 << 23);
+
+ /*
+ * Approximate the log2 of the transformed number using a degree 2
+ * interpolating polynomial for log2(x) over the interval [1, 2). Then
+ * add this to the extracted exponent to produce the final approximation
+ * of log2(x).
+ *
+ * The coefficients of the interpolating polynomial used here were found
+ * using the script tools/log2_interpolation.r.
+ */
+ return res - 1.653124006f + u.f * (1.9941812f - u.f * 0.3347490189f);
+
}
-/* Tally, and optionally (if next_chosen_item != NULL) record, in order, all
- * items in the current list of items found by the match-chooser. */
-static void
-lzx_declare_item_list(struct lzx_compressor *c, struct lzx_mc_pos_data *cur_optimum_ptr,
- struct lzx_item **next_chosen_item)
+/*
+ * Return the estimated cost of a symbol which has been estimated to have the
+ * given probability.
+ */
+static u32
+lzx_cost_for_probability(float prob)
{
- if (next_chosen_item)
- lzx_record_item_list(c, cur_optimum_ptr, next_chosen_item);
- else
- lzx_tally_item_list(c, cur_optimum_ptr);
+ /*
+ * The basic formula is:
+ *
+ * entropy = -log2(probability)
+ *
+ * Use this to get the cost in fractional bits. Then multiply by our
+ * scaling factor of BIT_COST and convert to an integer.
+ *
+ * In addition, the minimum cost is BIT_COST (one bit) because the
+ * entropy coding method will be Huffman codes.
+ *
+ * Careful: even though 'prob' should be <= 1.0, 'log2f_fast(prob)' may
+ * be positive due to inaccuracy in our log2 approximation. Therefore,
+ * we cannot, in general, assume the computed cost is non-negative, and
+ * we should make sure negative costs get rounded up correctly.
+ */
+ s32 cost = -log2f_fast(prob) * BIT_COST;
+ return max(cost, BIT_COST);
}
-/* Set the cost model @c->costs from the Huffman codeword lengths specified in
- * @lens.
+/*
+ * Mapping: number of used literals => heuristic probability of a literal times
+ * 6870. Generated by running this R command:
*
- * The cost model and codeword lengths are almost the same thing, but the
- * Huffman codewords with length 0 correspond to symbols with zero frequency
- * that still need to be assigned actual costs. The specific values assigned
- * are arbitrary, but they should be fairly high (near the maximum codeword
- * length) to take into account the fact that uses of these symbols are expected
- * to be rare. */
-static void
-lzx_set_costs(struct lzx_compressor *c, const struct lzx_lens * lens)
-{
- unsigned i;
-
- /* Main code */
- for (i = 0; i < c->num_main_syms; i++)
- c->costs.main[i] = lens->main[i] ? lens->main[i] : 15;
-
- /* Length code */
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = lens->len[i] ? lens->len[i] : 15;
+ * cat(paste(round(6870*2^-((304+(0:256))/64)), collapse=", "))
+ */
+static const u8 literal_scaled_probs[257] = {
+ 255, 253, 250, 247, 244, 242, 239, 237, 234, 232, 229, 227, 224, 222,
+ 219, 217, 215, 212, 210, 208, 206, 203, 201, 199, 197, 195, 193, 191,
+ 189, 186, 184, 182, 181, 179, 177, 175, 173, 171, 169, 167, 166, 164,
+ 162, 160, 159, 157, 155, 153, 152, 150, 149, 147, 145, 144, 142, 141,
+ 139, 138, 136, 135, 133, 132, 130, 129, 128, 126, 125, 124, 122, 121,
+ 120, 118, 117, 116, 115, 113, 112, 111, 110, 109, 107, 106, 105, 104,
+ 103, 102, 101, 100, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86,
+ 86, 85, 84, 83, 82, 81, 80, 79, 78, 78, 77, 76, 75, 74, 73, 73, 72, 71,
+ 70, 70, 69, 68, 67, 67, 66, 65, 65, 64, 63, 62, 62, 61, 60, 60, 59, 59,
+ 58, 57, 57, 56, 55, 55, 54, 54, 53, 53, 52, 51, 51, 50, 50, 49, 49, 48,
+ 48, 47, 47, 46, 46, 45, 45, 44, 44, 43, 43, 42, 42, 41, 41, 40, 40, 40,
+ 39, 39, 38, 38, 38, 37, 37, 36, 36, 36, 35, 35, 34, 34, 34, 33, 33, 33,
+ 32, 32, 32, 31, 31, 31, 30, 30, 30, 29, 29, 29, 28, 28, 28, 27, 27, 27,
+ 27, 26, 26, 26, 25, 25, 25, 25, 24, 24, 24, 24, 23, 23, 23, 23, 22, 22,
+ 22, 22, 21, 21, 21, 21, 20, 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18,
+ 18, 18, 18, 17, 17, 17, 17, 17, 16, 16, 16, 16
+};
- /* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- c->costs.aligned[i] = lens->aligned[i] ? lens->aligned[i] : 7;
-}
+/*
+ * Mapping: length symbol => default cost of that symbol. This is derived from
+ * sample data but has been slightly edited to add more bias towards the
+ * shortest lengths, which are the most common.
+ */
+static const u16 lzx_default_len_costs[LZX_LENCODE_NUM_SYMBOLS] = {
+ 300, 310, 320, 330, 360, 396, 399, 416, 451, 448, 463, 466, 505, 492,
+ 503, 514, 547, 531, 566, 561, 589, 563, 592, 586, 623, 602, 639, 627,
+ 659, 643, 657, 650, 685, 662, 661, 672, 685, 686, 696, 680, 657, 682,
+ 666, 699, 674, 699, 679, 709, 688, 712, 692, 714, 694, 716, 698, 712,
+ 706, 727, 714, 727, 713, 723, 712, 718, 719, 719, 720, 735, 725, 735,
+ 728, 740, 727, 739, 727, 742, 716, 733, 733, 740, 738, 746, 737, 747,
+ 738, 745, 736, 748, 742, 749, 745, 749, 743, 748, 741, 752, 745, 752,
+ 747, 750, 747, 752, 748, 753, 750, 752, 753, 753, 749, 744, 752, 755,
+ 753, 756, 745, 748, 746, 745, 723, 757, 755, 758, 755, 758, 752, 757,
+ 754, 757, 755, 759, 755, 758, 753, 755, 755, 758, 757, 761, 755, 750,
+ 758, 759, 759, 760, 758, 751, 757, 757, 759, 759, 758, 759, 758, 761,
+ 750, 761, 758, 760, 759, 761, 758, 761, 760, 752, 759, 760, 759, 759,
+ 757, 762, 760, 761, 761, 748, 761, 760, 762, 763, 752, 762, 762, 763,
+ 762, 762, 763, 763, 762, 763, 762, 763, 762, 763, 763, 764, 763, 762,
+ 763, 762, 762, 762, 764, 764, 763, 764, 763, 763, 763, 762, 763, 763,
+ 762, 764, 764, 763, 762, 763, 763, 763, 763, 762, 764, 763, 762, 764,
+ 764, 763, 763, 765, 764, 764, 762, 763, 764, 765, 763, 764, 763, 764,
+ 762, 764, 764, 754, 763, 764, 763, 763, 762, 763, 584,
+};
-/* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
- * algorithm. */
+/* Set default costs to bootstrap the iterative optimization algorithm. */
static void
-lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
+lzx_set_default_costs(struct lzx_compressor *c)
{
unsigned i;
+ u32 num_literals = 0;
+ u32 num_used_literals = 0;
+ float inv_num_matches = 1.0f / c->freqs.main[LZX_NUM_CHARS];
+ float inv_num_items;
+ float prob_match = 1.0f;
+ u32 match_cost;
+ float base_literal_prob;
+
+ /* Some numbers here have been hardcoded to assume a bit cost of 64. */
+ STATIC_ASSERT(BIT_COST == 64);
+
+ /* Estimate the number of literals that will used. 'num_literals' is
+ * the total number, whereas 'num_used_literals' is the number of
+ * distinct symbols. */
+ for (i = 0; i < LZX_NUM_CHARS; i++) {
+ num_literals += c->freqs.main[i];
+ num_used_literals += (c->freqs.main[i] != 0);
+ }
- /* Main code (part 1): Literal symbols */
- for (i = 0; i < LZX_NUM_CHARS; i++)
- costs->main[i] = 8;
-
- /* Main code (part 2): Match header symbols */
- for (; i < num_main_syms; i++)
- costs->main[i] = 10;
+ /* Note: all match headers were tallied as symbol 'LZX_NUM_CHARS'. We
+ * don't attempt to estimate which ones will be used. */
+
+ inv_num_items = 1.0f / (num_literals + c->freqs.main[LZX_NUM_CHARS]);
+ base_literal_prob = literal_scaled_probs[num_used_literals] *
+ (1.0f / 6870.0f);
+
+ /* Literal costs. We use two different methods to compute the
+ * probability of each literal and mix together their results. */
+ for (i = 0; i < LZX_NUM_CHARS; i++) {
+ u32 freq = c->freqs.main[i];
+ if (freq != 0) {
+ float prob = 0.5f * ((freq * inv_num_items) +
+ base_literal_prob);
+ c->costs.main[i] = lzx_cost_for_probability(prob);
+ prob_match -= prob;
+ } else {
+ c->costs.main[i] = 11 * BIT_COST;
+ }
+ }
- /* Length code */
+ /* Match header costs. We just assume that all match headers are
+ * equally probable, but we do take into account the relative cost of a
+ * match header vs. a literal depending on how common matches are
+ * expected to be vs. literals. */
+ prob_match = max(prob_match, 0.15f);
+ match_cost = lzx_cost_for_probability(prob_match / (c->num_main_syms -
+ LZX_NUM_CHARS));
+ for (; i < c->num_main_syms; i++)
+ c->costs.main[i] = match_cost;
+
+ /* Length symbol costs. These are just set to fixed values which
+ * reflect the fact the smallest lengths are typically the most common,
+ * and therefore are typically the cheapest. */
for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- costs->len[i] = 8;
-
- /* Aligned offset code */
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- costs->aligned[i] = 3;
-}
-
-/* Return the cost, in bits, to output a literal byte using the specified cost
- * model. */
-static inline u32
-lzx_literal_cost(unsigned literal, const struct lzx_costs * costs)
-{
- return costs->main[literal];
+ c->costs.len[i] = lzx_default_len_costs[i];
+
+#if CONSIDER_ALIGNED_COSTS
+ /* Aligned offset symbol costs. These are derived from the estimated
+ * probability of each aligned offset symbol. */
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ /* We intentionally tallied the frequencies in the wrong slots,
+ * not accounting for LZX_OFFSET_ADJUSTMENT, since doing the
+ * fixup here is faster: a constant 8 subtractions here vs. one
+ * addition for every match. */
+ unsigned j = (i - LZX_OFFSET_ADJUSTMENT) & LZX_ALIGNED_OFFSET_BITMASK;
+ if (c->freqs.aligned[j] != 0) {
+ float prob = c->freqs.aligned[j] * inv_num_matches;
+ c->costs.aligned[i] = lzx_cost_for_probability(prob);
+ } else {
+ c->costs.aligned[i] =
+ (2 * LZX_NUM_ALIGNED_OFFSET_BITS) * BIT_COST;
+ }
+ }
+#endif
}
-/* Return the cost, in bits, to output a match of the specified length and
- * offset slot using the specified cost model. Does not take into account
- * extra offset bits. */
-static inline u32
-lzx_match_cost_raw(unsigned len, unsigned offset_slot,
- const struct lzx_costs *costs)
+/* Update the current cost model to reflect the computed Huffman codes. */
+static void
+lzx_set_costs_from_codes(struct lzx_compressor *c)
{
- u32 cost;
- unsigned len_header;
- unsigned main_symbol;
-
- if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
- len_header = len - LZX_MIN_MATCH_LEN;
- cost = 0;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
+ unsigned i;
+ const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
- /* Account for length symbol. */
- cost = costs->len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ for (i = 0; i < c->num_main_syms; i++) {
+ c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+ MAIN_CODEWORD_LIMIT) * BIT_COST;
}
- /* Account for main symbol. */
- main_symbol = LZX_NUM_CHARS + ((offset_slot << 3) | len_header);
- cost += costs->main[main_symbol];
-
- return cost;
-}
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+ c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+ LENGTH_CODEWORD_LIMIT) * BIT_COST;
+ }
-/* Equivalent to lzx_match_cost_raw(), but assumes the length is small enough
- * that it doesn't require a length symbol. */
-static inline u32
-lzx_match_cost_raw_smalllen(unsigned len, unsigned offset_slot,
- const struct lzx_costs *costs)
-{
- LZX_ASSERT(len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
- return costs->main[LZX_NUM_CHARS +
- ((offset_slot << 3) | (len - LZX_MIN_MATCH_LEN))];
+#if CONSIDER_ALIGNED_COSTS
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+ ALIGNED_CODEWORD_LIMIT) * BIT_COST;
+ }
+#endif
}
/*
- * Consider coding the match at repeat offset index @rep_idx. Consider each
- * length from the minimum (2) to the full match length (@rep_len).
+ * Choose a "near-optimal" literal/match sequence to use for the current block,
+ * then flush the block. Because the cost of each Huffman symbol is unknown
+ * until the Huffman codes have been built and the Huffman codes themselves
+ * depend on the symbol frequencies, this uses an iterative optimization
+ * algorithm to approximate an optimal solution. The first optimization pass
+ * for the block uses default costs; additional passes use costs derived from
+ * the Huffman codes computed in the previous pass.
*/
-static inline void
-lzx_consider_repeat_offset_match(struct lzx_compressor *c,
- struct lzx_mc_pos_data *cur_optimum_ptr,
- unsigned rep_len, unsigned rep_idx)
+static forceinline struct lzx_lru_queue
+lzx_optimize_and_flush_block(struct lzx_compressor * const restrict c,
+ struct lzx_output_bitstream * const restrict os,
+ const u8 * const restrict block_begin,
+ const u32 block_size,
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
- u32 base_cost = cur_optimum_ptr->cost;
- u32 cost;
- unsigned len;
+ unsigned num_passes_remaining = c->num_optim_passes;
+ struct lzx_lru_queue new_queue;
+ u32 seq_idx;
-#if 1 /* Optimized version */
+ lzx_set_default_costs(c);
- if (rep_len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS) {
- /* All lengths being considered are small. */
- len = 2;
- do {
- cost = base_cost +
- lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->mc_item_data =
- (rep_idx << MC_OFFSET_SHIFT) | len;
- (cur_optimum_ptr + len)->cost = cost;
- }
- } while (++len <= rep_len);
- } else {
- /* Some lengths being considered are small, and some are big.
- * Start with the optimized loop for small lengths, then switch
- * to the optimized loop for big lengths. */
- len = 2;
- do {
- cost = base_cost +
- lzx_match_cost_raw_smalllen(len, rep_idx, &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->mc_item_data =
- (rep_idx << MC_OFFSET_SHIFT) | len;
- (cur_optimum_ptr + len)->cost = cost;
- }
- } while (++len < LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS);
+ for (;;) {
+ lzx_compute_match_costs(c);
+ new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
+ initial_queue, is_16_bit);
- /* The main symbol is now fixed. */
- base_cost += c->costs.main[LZX_NUM_CHARS +
- ((rep_idx << 3) | LZX_NUM_PRIMARY_LENS)];
- do {
- cost = base_cost +
- c->costs.len[len - LZX_MIN_MATCH_LEN -
- LZX_NUM_PRIMARY_LENS];
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->mc_item_data =
- (rep_idx << MC_OFFSET_SHIFT) | len;
- (cur_optimum_ptr + len)->cost = cost;
- }
- } while (++len <= rep_len);
- }
+ if (--num_passes_remaining == 0)
+ break;
-#else /* Unoptimized version */
+ /* At least one optimization pass remains. Update the costs. */
+ lzx_reset_symbol_frequencies(c);
+ lzx_tally_item_list(c, block_size, is_16_bit);
+ lzx_build_huffman_codes(c);
+ lzx_set_costs_from_codes(c);
+ }
- len = 2;
- do {
- cost = base_cost +
- lzx_match_cost_raw(len, rep_idx, &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->mc_item_data =
- (rep_idx << MC_OFFSET_SHIFT) | len;
- (cur_optimum_ptr + len)->cost = cost;
- }
- } while (++len <= rep_len);
-#endif
+ /* Done optimizing. Generate the sequence list and flush the block. */
+ lzx_reset_symbol_frequencies(c);
+ seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
+ lzx_flush_block(c, os, block_begin, block_size, seq_idx);
+ return new_queue;
}
/*
- * Consider coding each match in @matches as an explicit offset match.
+ * This is the "near-optimal" LZX compressor.
*
- * @matches must be sorted by strictly increasing length and strictly
- * increasing offset. This is guaranteed by the match-finder.
+ * For each block, it performs a relatively thorough graph search to find an
+ * inexpensive (in terms of compressed size) way to output the block.
*
- * We consider each length from the minimum (2) to the longest
- * (matches[num_matches - 1].len). For each length, we consider only
- * the smallest offset for which that length is available. Although
- * this is not guaranteed to be optimal due to the possibility of a
- * larger offset costing less than a smaller offset to code, this is a
- * very useful heuristic.
+ * Note: there are actually many things this algorithm leaves on the table in
+ * terms of compression ratio. So although it may be "near-optimal", it is
+ * certainly not "optimal". The goal is not to produce the optimal compression
+ * ratio, which for LZX is probably impossible within any practical amount of
+ * time, but rather to produce a compression ratio significantly better than a
+ * simpler "greedy" or "lazy" parse while still being relatively fast.
*/
-static inline void
-lzx_consider_explicit_offset_matches(struct lzx_compressor *c,
- struct lzx_mc_pos_data *cur_optimum_ptr,
- const struct lz_match matches[],
- unsigned num_matches)
+static forceinline void
+lzx_compress_near_optimal(struct lzx_compressor * restrict c,
+ const u8 * const restrict in_begin, size_t in_nbytes,
+ struct lzx_output_bitstream * restrict os,
+ bool is_16_bit)
{
- LZX_ASSERT(num_matches > 0);
+ const u8 * in_next = in_begin;
+ const u8 * const in_end = in_begin + in_nbytes;
+ u32 max_len = LZX_MAX_MATCH_LEN;
+ u32 nice_len = min(c->nice_match_length, max_len);
+ u32 next_hashes[2] = {0, 0};
+ struct lzx_lru_queue queue = LZX_QUEUE_INITIALIZER;
- unsigned i;
- unsigned len;
- unsigned offset_slot;
- u32 position_cost;
- u32 cost;
- u32 offset_data;
-
-
-#if 1 /* Optimized version */
+ /* Initialize the matchfinder. */
+ CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
- if (matches[num_matches - 1].offset < LZX_NUM_FAST_OFFSETS) {
+ do {
+ /* Starting a new block */
+
+ const u8 * const in_block_begin = in_next;
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
+ struct lz_match *cache_ptr = c->match_cache;
+ const u8 *next_search_pos = in_next;
+ const u8 *next_observation = in_next;
+ const u8 *next_pause_point =
+ min(in_next + min(MIN_BLOCK_SIZE,
+ in_max_block_end - in_next),
+ in_max_block_end - min(LZX_MAX_MATCH_LEN - 1,
+ in_max_block_end - in_next));
+
+ lzx_init_block_split_stats(&c->split_stats);
+ lzx_reset_symbol_frequencies(c);
+
+ if (in_next >= next_pause_point)
+ goto pause;
/*
- * Offset is small; the offset slot can be looked up directly in
- * c->offset_slot_fast.
- *
- * Additional optimizations:
- *
- * - Since the offset is small, it falls in the exponential part
- * of the offset slot bases and the number of extra offset
- * bits can be calculated directly as (offset_slot >> 1) - 1.
+ * Run the input buffer through the matchfinder, caching the
+ * matches, until we decide to end the block.
*
- * - Just consider the number of extra offset bits; don't
- * account for the aligned offset code. Usually this has
- * almost no effect on the compression ratio.
- *
- * - Start out in a loop optimized for small lengths. When the
- * length becomes high enough that a length symbol will be
- * needed, jump into a loop optimized for big lengths.
+ * For a tighter matchfinding loop, we compute a "pause point",
+ * which is the next position at which we may need to check
+ * whether to end the block or to decrease max_len. We then
+ * only do these extra checks upon reaching the pause point.
*/
-
- LZX_ASSERT(offset_slot <= 37); /* for extra bits formula */
-
- len = 2;
- i = 0;
+ resume_matchfinding:
do {
- offset_slot = c->offset_slot_fast[matches[i].offset];
- position_cost = cur_optimum_ptr->cost +
- ((offset_slot >> 1) - 1);
- offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
- do {
- if (len >= LZX_MIN_MATCH_LEN + LZX_NUM_PRIMARY_LENS)
- goto biglen;
- cost = position_cost +
- lzx_match_cost_raw_smalllen(len, offset_slot,
- &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->cost = cost;
- (cur_optimum_ptr + len)->mc_item_data =
- (offset_data << MC_OFFSET_SHIFT) | len;
+ if (in_next >= next_search_pos) {
+ /* Search for matches at this position. */
+ struct lz_match *lz_matchptr;
+ u32 best_len;
+
+ lz_matchptr = CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_get_matches,
+ in_begin,
+ in_next - in_begin,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &best_len,
+ cache_ptr + 1);
+ cache_ptr->length = lz_matchptr - (cache_ptr + 1);
+ cache_ptr = lz_matchptr;
+
+ /* Accumulate literal/match statistics for block
+ * splitting and for generating the initial cost
+ * model. */
+ if (in_next >= next_observation) {
+ best_len = cache_ptr[-1].length;
+ if (best_len >= 3) {
+ /* Match (len >= 3) */
+
+ /*
+ * Note: for performance reasons this has
+ * been simplified significantly:
+ *
+ * - We wait until later to account for
+ * LZX_OFFSET_ADJUSTMENT.
+ * - We don't account for repeat offsets.
+ * - We don't account for different match headers.
+ */
+ c->freqs.aligned[cache_ptr[-1].offset &
+ LZX_ALIGNED_OFFSET_BITMASK]++;
+ c->freqs.main[LZX_NUM_CHARS]++;
+
+ lzx_observe_match(&c->split_stats, best_len);
+ next_observation = in_next + best_len;
+ } else {
+ /* Literal */
+ c->freqs.main[*in_next]++;
+ lzx_observe_literal(&c->split_stats, *in_next);
+ next_observation = in_next + 1;
+ }
}
- } while (++len <= matches[i].len);
- } while (++i != num_matches);
-
- return;
- do {
- offset_slot = c->offset_slot_fast[matches[i].offset];
- biglen:
- position_cost = cur_optimum_ptr->cost +
- ((offset_slot >> 1) - 1) +
- c->costs.main[LZX_NUM_CHARS +
- ((offset_slot << 3) |
- LZX_NUM_PRIMARY_LENS)];
- offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
- do {
- cost = position_cost +
- c->costs.len[len - LZX_MIN_MATCH_LEN -
- LZX_NUM_PRIMARY_LENS];
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->cost = cost;
- (cur_optimum_ptr + len)->mc_item_data =
- (offset_data << MC_OFFSET_SHIFT) | len;
- }
- } while (++len <= matches[i].len);
- } while (++i != num_matches);
- } else {
- len = 2;
- i = 0;
- do {
- offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
- offset_slot = lzx_get_offset_slot_raw(offset_data);
- position_cost = cur_optimum_ptr->cost +
- lzx_extra_offset_bits[offset_slot];
- do {
- cost = position_cost +
- lzx_match_cost_raw(len, offset_slot, &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->cost = cost;
- (cur_optimum_ptr + len)->mc_item_data =
- (offset_data << MC_OFFSET_SHIFT) | len;
+ /*
+ * If there was a very long match found, then
+ * don't cache any matches for the bytes covered
+ * by that match. This avoids degenerate
+ * behavior when compressing highly redundant
+ * data, where the number of matches can be very
+ * large.
+ *
+ * This heuristic doesn't actually hurt the
+ * compression ratio *too* much. If there's a
+ * long match, then the data must be highly
+ * compressible, so it doesn't matter as much
+ * what we do.
+ */
+ if (best_len >= nice_len)
+ next_search_pos = in_next + best_len;
+ } else {
+ /* Don't search for matches at this position. */
+ CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_skip_position,
+ in_begin,
+ in_next - in_begin,
+ nice_len,
+ c->max_search_depth,
+ next_hashes);
+ cache_ptr->length = 0;
+ cache_ptr++;
+ }
+ } while (++in_next < next_pause_point &&
+ likely(cache_ptr < &c->match_cache[CACHE_LENGTH]));
+
+ pause:
+
+ /* Adjust max_len and nice_len if we're nearing the end of the
+ * input buffer. In addition, if we are so close to the end of
+ * the input buffer that there cannot be any more matches, then
+ * just advance through the last few positions and record no
+ * matches. */
+ if (unlikely(max_len > in_end - in_next)) {
+ max_len = in_end - in_next;
+ nice_len = min(max_len, nice_len);
+ if (max_len < BT_MATCHFINDER_REQUIRED_NBYTES) {
+ while (in_next != in_end) {
+ cache_ptr->length = 0;
+ cache_ptr++;
+ in_next++;
}
- } while (++len <= matches[i].len);
- } while (++i != num_matches);
- }
+ }
+ }
-#else /* Unoptimized version */
+ /* End the block if the match cache may overflow. */
+ if (unlikely(cache_ptr >= &c->match_cache[CACHE_LENGTH]))
+ goto end_block;
+
+ /* End the block if the soft maximum size has been reached. */
+ if (in_next >= in_max_block_end)
+ goto end_block;
+
+ /* End the block if the block splitting algorithm thinks this is
+ * a good place to do so. */
+ if (c->split_stats.num_new_observations >=
+ NUM_OBSERVATIONS_PER_BLOCK_CHECK &&
+ in_max_block_end - in_next >= MIN_BLOCK_SIZE &&
+ lzx_should_end_block(&c->split_stats))
+ goto end_block;
+
+ /* It's not time to end the block yet. Compute the next pause
+ * point and resume matchfinding. */
+ next_pause_point =
+ min(in_next + min(NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 -
+ c->split_stats.num_new_observations,
+ in_max_block_end - in_next),
+ in_max_block_end - min(LZX_MAX_MATCH_LEN - 1,
+ in_max_block_end - in_next));
+ goto resume_matchfinding;
+
+ end_block:
+ /* We've decided on a block boundary and cached matches. Now
+ * choose a match/literal sequence and flush the block. */
+ queue = lzx_optimize_and_flush_block(c, os, in_block_begin,
+ in_next - in_block_begin,
+ queue, is_16_bit);
+ } while (in_next != in_end);
+}
- unsigned num_extra_bits;
+static void
+lzx_compress_near_optimal_16(struct lzx_compressor *c, const u8 *in,
+ size_t in_nbytes, struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, in, in_nbytes, os, true);
+}
- len = 2;
- i = 0;
- do {
- offset_data = matches[i].offset + LZX_OFFSET_OFFSET;
- position_cost = cur_optimum_ptr->cost;
- offset_slot = lzx_get_offset_slot_raw(offset_data);
- num_extra_bits = lzx_extra_offset_bits[offset_slot];
- if (num_extra_bits >= 3) {
- position_cost += num_extra_bits - 3;
- position_cost += c->costs.aligned[offset_data & 7];
- } else {
- position_cost += num_extra_bits;
- }
- do {
- cost = position_cost +
- lzx_match_cost_raw(len, offset_slot, &c->costs);
- if (cost < (cur_optimum_ptr + len)->cost) {
- (cur_optimum_ptr + len)->cost = cost;
- (cur_optimum_ptr + len)->mc_item_data =
- (offset_data << MC_OFFSET_SHIFT) | len;
- }
- } while (++len <= matches[i].len);
- } while (++i != num_matches);
-#endif
+static void
+lzx_compress_near_optimal_32(struct lzx_compressor *c, const u8 *in,
+ size_t in_nbytes, struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, in, in_nbytes, os, false);
}
+/******************************************************************************/
+/* Faster ("lazy") compression algorithm */
+/*----------------------------------------------------------------------------*/
+
/*
- * Search for repeat offset matches with the current position.
+ * Called when the compressor chooses to use a literal. This tallies the
+ * Huffman symbol for the literal, increments the current literal run length,
+ * and "observes" the literal for the block split statistics.
*/
-static inline unsigned
-lzx_repsearch(const u8 * const strptr, const u32 bytes_remaining,
- const struct lzx_lru_queue *queue, unsigned *rep_max_idx_ret)
+static forceinline void
+lzx_choose_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
{
- BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
- return lz_repsearch3(strptr, min(bytes_remaining, LZX_MAX_MATCH_LEN),
- queue->R, rep_max_idx_ret);
+ lzx_observe_literal(&c->split_stats, literal);
+ c->freqs.main[literal]++;
+ ++*litrunlen_p;
}
/*
- * The main near-optimal parsing routine.
- *
- * Briefly, the algorithm does an approximate minimum-cost path search to find a
- * "near-optimal" sequence of matches and literals to output, based on the
- * current cost model. The algorithm steps forward, position by position (byte
- * by byte), and updates the minimum cost path to reach each later position that
- * can be reached using a match or literal from the current position. This is
- * essentially Dijkstra's algorithm in disguise: the graph nodes are positions,
- * the graph edges are possible matches/literals to code, and the cost of each
- * edge is the estimated number of bits that will be required to output the
- * corresponding match or literal. But one difference is that we actually
- * compute the lowest-cost path in pieces, where each piece is terminated when
- * there are no choices to be made.
- *
- * This function will run this algorithm on the portion of the window from
- * &c->cur_window[c->match_window_pos] to &c->cur_window[c->match_window_end].
- *
- * On entry, c->queue must be the current state of the match offset LRU queue,
- * and c->costs must be the current cost model to use for Huffman symbols.
- *
- * On exit, c->queue will be the state that the LRU queue would be in if the
- * chosen items were to be coded.
- *
- * If next_chosen_item != NULL, then all items chosen will be recorded (saved in
- * the chosen_items array). Otherwise, all items chosen will only be tallied
- * (symbol frequencies tallied in c->freqs).
+ * Called when the compressor chooses to use a match. This tallies the Huffman
+ * symbol(s) for a match, saves the match data and the length of the preceding
+ * literal run, updates the recent offsets queue, and "observes" the match for
+ * the block split statistics.
*/
-static void
-lzx_optim_pass(struct lzx_compressor *c, struct lzx_item **next_chosen_item)
+static forceinline void
+lzx_choose_match(struct lzx_compressor *c, unsigned length, u32 adjusted_offset,
+ u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
+ u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
{
- const u8 *block_end;
- struct lzx_lru_queue *begin_queue;
- const u8 *window_ptr;
- struct lzx_mc_pos_data *cur_optimum_ptr;
- struct lzx_mc_pos_data *end_optimum_ptr;
- const struct lz_match *matches;
- unsigned num_matches;
- unsigned longest_len;
- unsigned rep_max_len;
- unsigned rep_max_idx;
- unsigned literal;
- unsigned len;
- u32 cost;
- u32 offset_data;
-
- block_end = &c->cur_window[c->match_window_end];
- begin_queue = &c->queue;
-begin:
- /* Start building a new list of items, which will correspond to the next
- * piece of the overall minimum-cost path.
- *
- * *begin_queue is the current state of the match offset LRU queue. */
+ struct lzx_sequence *next_seq = *next_seq_p;
+ unsigned mainsym;
+
+ lzx_observe_match(&c->split_stats, length);
+
+ mainsym = lzx_tally_main_and_lensyms(c, length, adjusted_offset,
+ is_16_bit);
+ next_seq->litrunlen_and_matchlen =
+ (*litrunlen_p << SEQ_MATCHLEN_BITS) | length;
+ next_seq->adjusted_offset_and_mainsym =
+ (adjusted_offset << SEQ_MAINSYM_BITS) | mainsym;
+
+ /* Update the recent offsets queue. */
+ if (adjusted_offset < LZX_NUM_RECENT_OFFSETS) {
+ /* Repeat offset match. */
+ swap(recent_offsets[0], recent_offsets[adjusted_offset]);
+ } else {
+ /* Explicit offset match. */
- window_ptr = &c->cur_window[c->match_window_pos];
+ /* Tally the aligned offset symbol if needed. */
+ if (adjusted_offset >= LZX_MIN_ALIGNED_OFFSET + LZX_OFFSET_ADJUSTMENT)
+ c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++;
- if (window_ptr == block_end) {
- c->queue = *begin_queue;
- return;
+ recent_offsets[2] = recent_offsets[1];
+ recent_offsets[1] = recent_offsets[0];
+ recent_offsets[0] = adjusted_offset - LZX_OFFSET_ADJUSTMENT;
}
- cur_optimum_ptr = c->optimum;
- cur_optimum_ptr->cost = 0;
- cur_optimum_ptr->queue = *begin_queue;
-
- end_optimum_ptr = cur_optimum_ptr;
-
- /* The following loop runs once for each per byte in the window, except
- * in a couple shortcut cases. */
- for (;;) {
-
- /* Find explicit offset matches with the current position. */
- num_matches = lzx_get_matches(c, &matches);
-
- if (num_matches) {
- /*
- * Find the longest repeat offset match with the current
- * position.
- *
- * Heuristics:
- *
- * - Only search for repeat offset matches if the
- * match-finder already found at least one match.
- *
- * - Only consider the longest repeat offset match. It
- * seems to be rare for the optimal parse to include a
- * repeat offset match that doesn't have the longest
- * length (allowing for the possibility that not all
- * of that length is actually used).
- */
- rep_max_len = lzx_repsearch(window_ptr,
- block_end - window_ptr,
- &cur_optimum_ptr->queue,
- &rep_max_idx);
-
- if (rep_max_len) {
- /* If there's a very long repeat offset match,
- * choose it immediately. */
- if (rep_max_len >= c->params.nice_match_length) {
-
- swap(cur_optimum_ptr->queue.R[0],
- cur_optimum_ptr->queue.R[rep_max_idx]);
- begin_queue = &cur_optimum_ptr->queue;
-
- cur_optimum_ptr += rep_max_len;
- cur_optimum_ptr->mc_item_data =
- (rep_max_idx << MC_OFFSET_SHIFT) |
- rep_max_len;
-
- lzx_skip_bytes(c, rep_max_len - 1);
- break;
- }
-
- /* If reaching any positions for the first time,
- * initialize their costs to "infinity". */
- while (end_optimum_ptr < cur_optimum_ptr + rep_max_len)
- (++end_optimum_ptr)->cost = MC_INFINITE_COST;
-
- /* Consider coding a repeat offset match. */
- lzx_consider_repeat_offset_match(c,
- cur_optimum_ptr,
- rep_max_len,
- rep_max_idx);
- }
-
- longest_len = matches[num_matches - 1].len;
-
- /* If there's a very long explicit offset match, choose
- * it immediately. */
- if (longest_len >= c->params.nice_match_length) {
-
- cur_optimum_ptr->queue.R[2] =
- cur_optimum_ptr->queue.R[1];
- cur_optimum_ptr->queue.R[1] =
- cur_optimum_ptr->queue.R[0];
- cur_optimum_ptr->queue.R[0] =
- matches[num_matches - 1].offset;
- begin_queue = &cur_optimum_ptr->queue;
-
- offset_data = matches[num_matches - 1].offset +
- LZX_OFFSET_OFFSET;
- cur_optimum_ptr += longest_len;
- cur_optimum_ptr->mc_item_data =
- (offset_data << MC_OFFSET_SHIFT) |
- longest_len;
-
- lzx_skip_bytes(c, longest_len - 1);
- break;
- }
-
- /* If reaching any positions for the first time,
- * initialize their costs to "infinity". */
- while (end_optimum_ptr < cur_optimum_ptr + longest_len)
- (++end_optimum_ptr)->cost = MC_INFINITE_COST;
-
- /* Consider coding an explicit offset match. */
- lzx_consider_explicit_offset_matches(c, cur_optimum_ptr,
- matches, num_matches);
- } else {
- /* No matches found. The only choice at this position
- * is to code a literal. */
-
- if (end_optimum_ptr == cur_optimum_ptr) {
- #if 1
- /* Optimization for single literals. */
- if (likely(cur_optimum_ptr == c->optimum)) {
- lzx_declare_literal(c, *window_ptr++,
- next_chosen_item);
- if (window_ptr == block_end) {
- c->queue = cur_optimum_ptr->queue;
- return;
- }
- continue;
- }
- #endif
- (++end_optimum_ptr)->cost = MC_INFINITE_COST;
- }
- }
-
- /* Consider coding a literal.
-
- * To avoid an extra unpredictable brench, actually checking the
- * preferability of coding a literal is integrated into the
- * queue update code below. */
- literal = *window_ptr++;
- cost = cur_optimum_ptr->cost + lzx_literal_cost(literal, &c->costs);
-
- /* Advance to the next position. */
- cur_optimum_ptr++;
+ /* Reset the literal run length and advance to the next sequence. */
+ *next_seq_p = next_seq + 1;
+ *litrunlen_p = 0;
+}
- /* The lowest-cost path to the current position is now known.
- * Finalize the recent offsets queue that results from taking
- * this lowest-cost path. */
+/*
+ * Called when the compressor ends a block. This finshes the last lzx_sequence,
+ * which is just a literal run with no following match. This literal run might
+ * be empty.
+ */
+static forceinline void
+lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
+{
+ last_seq->litrunlen_and_matchlen = litrunlen << SEQ_MATCHLEN_BITS;
+}
- if (cost < cur_optimum_ptr->cost) {
- /* Literal: queue remains unchanged. */
- cur_optimum_ptr->cost = cost;
- cur_optimum_ptr->mc_item_data = (literal << MC_OFFSET_SHIFT) | 1;
- cur_optimum_ptr->queue = (cur_optimum_ptr - 1)->queue;
- } else {
- /* Match: queue update is needed. */
- len = cur_optimum_ptr->mc_item_data & MC_LEN_MASK;
- offset_data = cur_optimum_ptr->mc_item_data >> MC_OFFSET_SHIFT;
- if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
- /* Explicit offset match: offset is inserted at front */
- cur_optimum_ptr->queue.R[0] = offset_data - LZX_OFFSET_OFFSET;
- cur_optimum_ptr->queue.R[1] = (cur_optimum_ptr - len)->queue.R[0];
- cur_optimum_ptr->queue.R[2] = (cur_optimum_ptr - len)->queue.R[1];
- } else {
- /* Repeat offset match: offset is swapped to front */
- cur_optimum_ptr->queue = (cur_optimum_ptr - len)->queue;
- swap(cur_optimum_ptr->queue.R[0],
- cur_optimum_ptr->queue.R[offset_data]);
- }
+/*
+ * Find the longest repeat offset match with the current position. If a match
+ * is found, return its length and set *best_rep_idx_ret to the index of its
+ * offset in @recent_offsets. Otherwise, return 0.
+ *
+ * Don't bother with length 2 matches; consider matches of length >= 3 only.
+ * Also assume that max_len >= 3.
+ */
+static unsigned
+lzx_find_longest_repeat_offset_match(const u8 * const in_next,
+ const u32 recent_offsets[],
+ const unsigned max_len,
+ unsigned *best_rep_idx_ret)
+{
+ STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); /* loop is unrolled */
+
+ const u32 seq3 = load_u24_unaligned(in_next);
+ const u8 *matchptr;
+ unsigned best_rep_len = 0;
+ unsigned best_rep_idx = 0;
+ unsigned rep_len;
+
+ /* Check for rep0 match (most recent offset) */
+ matchptr = in_next - recent_offsets[0];
+ if (load_u24_unaligned(matchptr) == seq3)
+ best_rep_len = lz_extend(in_next, matchptr, 3, max_len);
+
+ /* Check for rep1 match (second most recent offset) */
+ matchptr = in_next - recent_offsets[1];
+ if (load_u24_unaligned(matchptr) == seq3) {
+ rep_len = lz_extend(in_next, matchptr, 3, max_len);
+ if (rep_len > best_rep_len) {
+ best_rep_len = rep_len;
+ best_rep_idx = 1;
}
+ }
- /*
- * This loop will terminate when either of the following
- * conditions is true:
- *
- * (1) cur_optimum_ptr == end_optimum_ptr
- *
- * There are no paths that extend beyond the current
- * position. In this case, any path to a later position
- * must pass through the current position, so we can go
- * ahead and choose the list of items that led to this
- * position.
- *
- * (2) cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH]
- *
- * This bounds the number of times the algorithm can step
- * forward before it is guaranteed to start choosing items.
- * This limits the memory usage. But
- * LZX_OPTIM_ARRAY_LENGTH is high enough that on most
- * inputs this limit is never reached.
- *
- * Note: no check for end-of-block is needed because
- * end-of-block will trigger condition (1).
- */
- if (cur_optimum_ptr == end_optimum_ptr ||
- cur_optimum_ptr == &c->optimum[LZX_OPTIM_ARRAY_LENGTH])
- {
- begin_queue = &cur_optimum_ptr->queue;
- break;
+ /* Check for rep2 match (third most recent offset) */
+ matchptr = in_next - recent_offsets[2];
+ if (load_u24_unaligned(matchptr) == seq3) {
+ rep_len = lz_extend(in_next, matchptr, 3, max_len);
+ if (rep_len > best_rep_len) {
+ best_rep_len = rep_len;
+ best_rep_idx = 2;
}
}
- /* Choose the current list of items that constitute the minimum-cost
- * path to the current position. */
- lzx_declare_item_list(c, cur_optimum_ptr, next_chosen_item);
- goto begin;
+ *best_rep_idx_ret = best_rep_idx;
+ return best_rep_len;
}
-/* Fast heuristic scoring for lazy parsing: how "good" is this match? */
-static inline unsigned
+/*
+ * Fast heuristic scoring for lazy parsing: how "good" is this match?
+ * This is mainly determined by the length: longer matches are better.
+ * However, we also give a bonus to close (small offset) matches and to repeat
+ * offset matches, since those require fewer bits to encode.
+ */
+
+static forceinline unsigned
lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset)
{
unsigned score = len;
- if (adjusted_offset < 2048)
+ if (adjusted_offset < 4096)
score++;
-
- if (adjusted_offset < 1024)
+ if (adjusted_offset < 256)
score++;
return score;
}
-static inline unsigned
-lzx_repeat_offset_match_score(unsigned len, unsigned slot)
+static forceinline unsigned
+lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx)
{
- return len + 3;
+ return rep_len + 3;
}
-/* Lazy parsing */
-static u32
-lzx_choose_lazy_items_for_block(struct lzx_compressor *c,
- u32 block_start_pos, u32 block_size)
+/*
+ * This is the "lazy" LZX compressor. The basic idea is that before it chooses
+ * a match, it checks to see if there's a longer match at the next position. If
+ * yes, it chooses a literal and continues to the next position. If no, it
+ * chooses the match.
+ *
+ * Some additional heuristics are used as well. Repeat offset matches are
+ * considered favorably and sometimes are chosen immediately. In addition, long
+ * matches (at least "nice_len" bytes) are chosen immediately as well. Finally,
+ * when we decide whether a match is "better" than another, we take the offset
+ * into consideration as well as the length.
+ */
+static forceinline void
+lzx_compress_lazy(struct lzx_compressor * restrict c,
+ const u8 * const restrict in_begin, size_t in_nbytes,
+ struct lzx_output_bitstream * restrict os, bool is_16_bit)
{
- const u8 *window_ptr;
- const u8 *block_end;
- struct lz_mf *mf;
- struct lz_match *matches;
- unsigned num_matches;
- unsigned cur_len;
- u32 cur_offset_data;
- unsigned cur_score;
- unsigned rep_max_len;
- unsigned rep_max_idx;
- unsigned rep_score;
- unsigned prev_len;
- unsigned prev_score;
- u32 prev_offset_data;
- unsigned skip_len;
- struct lzx_item *next_chosen_item;
-
- window_ptr = &c->cur_window[block_start_pos];
- block_end = window_ptr + block_size;
- matches = c->cached_matches;
- mf = c->mf;
- next_chosen_item = c->chosen_items;
-
- prev_len = 0;
- prev_offset_data = 0;
- prev_score = 0;
-
- while (window_ptr != block_end) {
-
- /* Find explicit offset matches with the current position. */
- num_matches = lz_mf_get_matches(mf, matches);
- window_ptr++;
-
- if (num_matches == 0 ||
- (matches[num_matches - 1].len == 3 &&
- matches[num_matches - 1].offset >= 8192 - LZX_OFFSET_OFFSET &&
- matches[num_matches - 1].offset != c->queue.R[0] &&
- matches[num_matches - 1].offset != c->queue.R[1] &&
- matches[num_matches - 1].offset != c->queue.R[2]))
- {
- /* No match found, or the only match found was a distant
- * length 3 match. Output the previous match if there
- * is one; otherwise output a literal. */
-
- no_match_found:
-
- if (prev_len) {
- skip_len = prev_len - 2;
- goto output_prev_match;
- } else {
- lzx_declare_literal(c, *(window_ptr - 1),
- &next_chosen_item);
- continue;
- }
- }
+ const u8 * in_next = in_begin;
+ const u8 * const in_end = in_begin + in_nbytes;
+ unsigned max_len = LZX_MAX_MATCH_LEN;
+ unsigned nice_len = min(c->nice_match_length, max_len);
+ STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
+ u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
+ u32 next_hashes[2] = {0, 0};
- /* Find the longest repeat offset match with the current
- * position. */
- if (likely(block_end - (window_ptr - 1) >= 2)) {
- rep_max_len = lzx_repsearch((window_ptr - 1),
- block_end - (window_ptr - 1),
- &c->queue, &rep_max_idx);
- } else {
- rep_max_len = 0;
- }
-
- cur_len = matches[num_matches - 1].len;
- cur_offset_data = matches[num_matches - 1].offset + LZX_OFFSET_OFFSET;
- cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data);
-
- /* Select the better of the explicit and repeat offset matches. */
- if (rep_max_len >= 3 &&
- (rep_score = lzx_repeat_offset_match_score(rep_max_len,
- rep_max_idx)) >= cur_score)
- {
- cur_len = rep_max_len;
- cur_offset_data = rep_max_idx;
- cur_score = rep_score;
- }
-
- if (unlikely(cur_len > block_end - (window_ptr - 1))) {
- /* Nearing end of block. */
- cur_len = block_end - (window_ptr - 1);
- if (cur_len < 3)
- goto no_match_found;
- }
-
- if (prev_len == 0 || cur_score > prev_score) {
- /* No previous match, or the current match is better
- * than the previous match.
- *
- * If there's a previous match, then output a literal in
- * its place.
- *
- * In both cases, if the current match is very long,
- * then output it immediately. Otherwise, attempt a
- * lazy match by waiting to see if there's a better
- * match at the next position. */
-
- if (prev_len)
- lzx_declare_literal(c, *(window_ptr - 2), &next_chosen_item);
+ /* Initialize the matchfinder. */
+ CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
- prev_len = cur_len;
- prev_offset_data = cur_offset_data;
- prev_score = cur_score;
+ do {
+ /* Starting a new block */
+
+ const u8 * const in_block_begin = in_next;
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
+ struct lzx_sequence *next_seq = c->chosen_sequences;
+ u32 litrunlen = 0;
+ unsigned cur_len;
+ u32 cur_offset;
+ u32 cur_adjusted_offset;
+ unsigned cur_score;
+ unsigned next_len;
+ u32 next_offset;
+ u32 next_adjusted_offset;
+ unsigned next_score;
+ unsigned best_rep_len;
+ unsigned best_rep_idx;
+ unsigned rep_score;
+ unsigned skip_len;
+
+ lzx_reset_symbol_frequencies(c);
+ lzx_init_block_split_stats(&c->split_stats);
- if (prev_len >= c->params.nice_match_length) {
- skip_len = prev_len - 1;
- goto output_prev_match;
+ do {
+ /* Adjust max_len and nice_len if we're nearing the end
+ * of the input buffer. */
+ if (unlikely(max_len > in_end - in_next)) {
+ max_len = in_end - in_next;
+ nice_len = min(max_len, nice_len);
}
- continue;
- }
-
- /* Current match is not better than the previous match, so
- * output the previous match. */
- skip_len = prev_len - 2;
-
- output_prev_match:
- if (prev_offset_data < LZX_NUM_RECENT_OFFSETS) {
- lzx_declare_repeat_offset_match(c, prev_len,
- prev_offset_data,
- &next_chosen_item);
- swap(c->queue.R[0], c->queue.R[prev_offset_data]);
- } else {
- lzx_declare_explicit_offset_match(c, prev_len,
- prev_offset_data - LZX_OFFSET_OFFSET,
- &next_chosen_item);
- c->queue.R[2] = c->queue.R[1];
- c->queue.R[1] = c->queue.R[0];
- c->queue.R[0] = prev_offset_data - LZX_OFFSET_OFFSET;
- }
- lz_mf_skip_positions(mf, skip_len);
- window_ptr += skip_len;
- prev_len = 0;
- }
-
- return next_chosen_item - c->chosen_items;
-}
-
-/* Given the frequencies of symbols in an LZX-compressed block and the
- * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
- * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
- * will take fewer bits to output. */
-static int
-lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
- const struct lzx_codes * codes)
-{
- u32 aligned_cost = 0;
- u32 verbatim_cost = 0;
-
- /* A verbatim block requires 3 bits in each place that an aligned symbol
- * would be used in an aligned offset block. */
- for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
- verbatim_cost += 3 * freqs->aligned[i];
- aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
- }
-
- /* Account for output of the aligned offset code. */
- aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS;
-
- if (aligned_cost < verbatim_cost)
- return LZX_BLOCKTYPE_ALIGNED;
- else
- return LZX_BLOCKTYPE_VERBATIM;
-}
-
-/* Near-optimal parsing */
-static u32
-lzx_choose_near_optimal_items_for_block(struct lzx_compressor *c,
- u32 block_start_pos, u32 block_size)
-{
- u32 num_passes_remaining = c->params.num_optim_passes;
- struct lzx_lru_queue orig_queue;
- struct lzx_item *next_chosen_item;
- struct lzx_item **next_chosen_item_ptr;
-
- /* Choose appropriate match-finder wrapper functions. */
- if (num_passes_remaining > 1) {
- if (block_size == c->cur_window_size)
- c->get_matches_func = lzx_get_matches_fillcache_singleblock;
- else
- c->get_matches_func = lzx_get_matches_fillcache_multiblock;
- c->skip_bytes_func = lzx_skip_bytes_fillcache;
- } else {
- if (block_size == c->cur_window_size)
- c->get_matches_func = lzx_get_matches_nocache_singleblock;
- else
- c->get_matches_func = lzx_get_matches_nocache_multiblock;
- c->skip_bytes_func = lzx_skip_bytes_nocache;
- }
-
- /* No matches will extend beyond the end of the block. */
- c->match_window_end = block_start_pos + block_size;
-
- /* The first optimization pass will use a default cost model. Each
- * additional optimization pass will use a cost model computed from the
- * previous pass.
- *
- * To improve performance we only generate the array containing the
- * matches and literals in intermediate form on the final pass. For
- * earlier passes, tallying symbol frequencies is sufficient. */
- lzx_set_default_costs(&c->costs, c->num_main_syms);
-
- next_chosen_item_ptr = NULL;
- orig_queue = c->queue;
- do {
- /* Reset the match-finder wrapper. */
- c->match_window_pos = block_start_pos;
- c->cache_ptr = c->cached_matches;
-
- if (num_passes_remaining == 1) {
- /* Last pass: actually generate the items. */
- next_chosen_item = c->chosen_items;
- next_chosen_item_ptr = &next_chosen_item;
- }
+ /* Find the longest match (subject to the
+ * max_search_depth cutoff parameter) with the current
+ * position. Don't bother with length 2 matches; only
+ * look for matches of length >= 3. */
+ cur_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ 2,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &cur_offset);
+
+ /* If there was no match found, or the only match found
+ * was a distant short match, then choose a literal. */
+ if (cur_len < 3 ||
+ (cur_len == 3 &&
+ cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
+ cur_offset != recent_offsets[0] &&
+ cur_offset != recent_offsets[1] &&
+ cur_offset != recent_offsets[2]))
+ {
+ lzx_choose_literal(c, *in_next, &litrunlen);
+ in_next++;
+ continue;
+ }
- /* Choose the items. */
- lzx_optim_pass(c, next_chosen_item_ptr);
+ /* Heuristic: if this match has the most recent offset,
+ * then go ahead and choose it as a rep0 match. */
+ if (cur_offset == recent_offsets[0]) {
+ in_next++;
+ skip_len = cur_len - 1;
+ cur_adjusted_offset = 0;
+ goto choose_cur_match;
+ }
- if (num_passes_remaining > 1) {
- /* This isn't the last pass. */
+ /* Compute the longest match's score as an explicit
+ * offset match. */
+ cur_adjusted_offset = cur_offset + LZX_OFFSET_ADJUSTMENT;
+ cur_score = lzx_explicit_offset_match_score(cur_len, cur_adjusted_offset);
+
+ /* Find the longest repeat offset match at this
+ * position. If we find one and it's "better" than the
+ * explicit offset match we found, then go ahead and
+ * choose the repeat offset match immediately. */
+ best_rep_len = lzx_find_longest_repeat_offset_match(in_next,
+ recent_offsets,
+ max_len,
+ &best_rep_idx);
+ in_next++;
+
+ if (best_rep_len != 0 &&
+ (rep_score = lzx_repeat_offset_match_score(best_rep_len,
+ best_rep_idx)) >= cur_score)
+ {
+ cur_len = best_rep_len;
+ cur_adjusted_offset = best_rep_idx;
+ skip_len = best_rep_len - 1;
+ goto choose_cur_match;
+ }
- /* Make the Huffman codes from the symbol frequencies. */
- lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
- c->num_main_syms);
+ have_cur_match:
+ /*
+ * We have a match at the current position. If the
+ * match is very long, then choose it immediately.
+ * Otherwise, see if there's a better match at the next
+ * position.
+ */
- /* Update symbol costs. */
- lzx_set_costs(c, &c->codes[c->codes_index].lens);
+ if (cur_len >= nice_len) {
+ skip_len = cur_len - 1;
+ goto choose_cur_match;
+ }
- /* Reset symbol frequencies. */
- memset(&c->freqs, 0, sizeof(c->freqs));
+ if (unlikely(max_len > in_end - in_next)) {
+ max_len = in_end - in_next;
+ nice_len = min(max_len, nice_len);
+ }
- /* Reset the match offset LRU queue to what it was at
- * the beginning of the block. */
- c->queue = orig_queue;
+ next_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ cur_len - 2,
+ max_len,
+ nice_len,
+ c->max_search_depth / 2,
+ next_hashes,
+ &next_offset);
+
+ if (next_len <= cur_len - 2) {
+ /* No potentially better match was found. */
+ in_next++;
+ skip_len = cur_len - 2;
+ goto choose_cur_match;
+ }
- /* Choose appopriate match-finder wrapper functions. */
- if (c->cache_ptr <= c->cache_limit) {
- c->get_matches_func = lzx_get_matches_usecache_nocheck;
- c->skip_bytes_func = lzx_skip_bytes_usecache_nocheck;
+ next_adjusted_offset = next_offset + LZX_OFFSET_ADJUSTMENT;
+ next_score = lzx_explicit_offset_match_score(next_len, next_adjusted_offset);
+
+ best_rep_len = lzx_find_longest_repeat_offset_match(in_next,
+ recent_offsets,
+ max_len,
+ &best_rep_idx);
+ in_next++;
+
+ if (best_rep_len != 0 &&
+ (rep_score = lzx_repeat_offset_match_score(best_rep_len,
+ best_rep_idx)) >= next_score)
+ {
+
+ if (rep_score > cur_score) {
+ /* The next match is better, and it's a
+ * repeat offset match. */
+ lzx_choose_literal(c, *(in_next - 2),
+ &litrunlen);
+ cur_len = best_rep_len;
+ cur_adjusted_offset = best_rep_idx;
+ skip_len = cur_len - 1;
+ goto choose_cur_match;
+ }
} else {
- c->get_matches_func = lzx_get_matches_usecache;
- c->skip_bytes_func = lzx_skip_bytes_usecache;
+ if (next_score > cur_score) {
+ /* The next match is better, and it's an
+ * explicit offset match. */
+ lzx_choose_literal(c, *(in_next - 2),
+ &litrunlen);
+ cur_len = next_len;
+ cur_adjusted_offset = next_adjusted_offset;
+ cur_score = next_score;
+ goto have_cur_match;
+ }
}
- }
- } while (--num_passes_remaining);
- /* Return the number of items chosen. */
- return next_chosen_item - c->chosen_items;
+ /* The original match was better; choose it. */
+ skip_len = cur_len - 2;
+
+ choose_cur_match:
+ /* Choose a match and have the matchfinder skip over its
+ * remaining bytes. */
+ lzx_choose_match(c, cur_len, cur_adjusted_offset,
+ recent_offsets, is_16_bit,
+ &litrunlen, &next_seq);
+ in_next = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_skip_positions,
+ in_begin,
+ in_next - in_begin,
+ in_end - in_begin,
+ skip_len,
+ next_hashes);
+
+ /* Keep going until it's time to end the block. */
+ } while (in_next < in_max_block_end &&
+ !(c->split_stats.num_new_observations >=
+ NUM_OBSERVATIONS_PER_BLOCK_CHECK &&
+ in_next - in_block_begin >= MIN_BLOCK_SIZE &&
+ in_end - in_next >= MIN_BLOCK_SIZE &&
+ lzx_should_end_block(&c->split_stats)));
+
+ /* Flush the block. */
+ lzx_finish_sequence(next_seq, litrunlen);
+ lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0);
+
+ /* Keep going until we've reached the end of the input buffer. */
+ } while (in_next != in_end);
}
-/*
- * Choose the matches/literals with which to output the block of data beginning
- * at '&c->cur_window[block_start_pos]' and extending for 'block_size' bytes.
- *
- * The frequences of the Huffman symbols in the block will be tallied in
- * 'c->freqs'.
- *
- * 'c->queue' must specify the state of the queue at the beginning of this block.
- * This function will update it to the state of the queue at the end of this
- * block.
- *
- * Returns the number of matches/literals that were chosen and written to
- * 'c->chosen_items' in the 'struct lzx_item' intermediate representation.
- */
-static u32
-lzx_choose_items_for_block(struct lzx_compressor *c,
- u32 block_start_pos, u32 block_size)
+static void
+lzx_compress_lazy_16(struct lzx_compressor *c, const u8 *in, size_t in_nbytes,
+ struct lzx_output_bitstream *os)
{
- return (*c->params.choose_items_for_block)(c, block_start_pos, block_size);
+ lzx_compress_lazy(c, in, in_nbytes, os, true);
}
-/* Initialize c->offset_slot_fast. */
static void
-lzx_init_offset_slot_fast(struct lzx_compressor *c)
+lzx_compress_lazy_32(struct lzx_compressor *c, const u8 *in, size_t in_nbytes,
+ struct lzx_output_bitstream *os)
{
- u8 slot = 0;
+ lzx_compress_lazy(c, in, in_nbytes, os, false);
+}
- for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
+/******************************************************************************/
+/* Compressor operations */
+/*----------------------------------------------------------------------------*/
- while (offset + LZX_OFFSET_OFFSET >= lzx_offset_slot_base[slot + 1])
+/*
+ * Generate tables for mapping match offsets (actually, "adjusted" match
+ * offsets) to offset slots.
+ */
+static void
+lzx_init_offset_slot_tabs(struct lzx_compressor *c)
+{
+ u32 adjusted_offset = 0;
+ unsigned slot = 0;
+
+ /* slots [0, 29] */
+ for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
+ adjusted_offset++)
+ {
+ if (adjusted_offset >= lzx_offset_slot_base[slot + 1] +
+ LZX_OFFSET_ADJUSTMENT)
slot++;
+ c->offset_slot_tab_1[adjusted_offset] = slot;
+ }
- c->offset_slot_fast[offset] = slot;
+ /* slots [30, 49] */
+ for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
+ adjusted_offset += (u32)1 << 14)
+ {
+ if (adjusted_offset >= lzx_offset_slot_base[slot + 1] +
+ LZX_OFFSET_ADJUSTMENT)
+ slot++;
+ c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
}
}
-/* Set internal compression parameters for the specified compression level and
- * maximum window size. */
-static void
-lzx_build_params(unsigned int compression_level, u32 max_window_size,
- struct lzx_compressor_params *lzx_params)
+static size_t
+lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
{
- if (compression_level < 25) {
-
- /* Fast compression: Use lazy parsing. */
-
- lzx_params->choose_items_for_block = lzx_choose_lazy_items_for_block;
- lzx_params->num_optim_passes = 1;
-
- /* When lazy parsing, the hash chain match-finding algorithm is
- * fastest unless the window is too large.
- *
- * TODO: something like hash arrays would actually be better
- * than binary trees on large windows. */
- if (max_window_size <= 262144)
- lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
+ if (compression_level <= MAX_FAST_LEVEL) {
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, hc_mf_16) +
+ hc_matchfinder_size_16(max_bufsize);
else
- lzx_params->mf_algo = LZ_MF_BINARY_TREES;
-
- /* When lazy parsing, don't bother with length 2 matches. */
- lzx_params->min_match_length = 3;
-
- /* Scale nice_match_length and max_search_depth with the
- * compression level. */
- lzx_params->nice_match_length = 25 + compression_level * 2;
- lzx_params->max_search_depth = 25 + compression_level;
+ return offsetof(struct lzx_compressor, hc_mf_32) +
+ hc_matchfinder_size_32(max_bufsize);
} else {
-
- /* Normal / high compression: Use near-optimal parsing. */
-
- lzx_params->choose_items_for_block = lzx_choose_near_optimal_items_for_block;
-
- /* Set a number of optimization passes appropriate for the
- * compression level. */
-
- lzx_params->num_optim_passes = 1;
-
- if (compression_level >= 40)
- lzx_params->num_optim_passes++;
-
- /* Use more optimization passes for higher compression levels.
- * But the more passes there are, the less they help --- so
- * don't add them linearly. */
- if (compression_level >= 70) {
- lzx_params->num_optim_passes++;
- if (compression_level >= 100)
- lzx_params->num_optim_passes++;
- if (compression_level >= 150)
- lzx_params->num_optim_passes++;
- if (compression_level >= 200)
- lzx_params->num_optim_passes++;
- if (compression_level >= 300)
- lzx_params->num_optim_passes++;
- }
-
- /* When doing near-optimal parsing, the hash chain match-finding
- * algorithm is good if the window size is small and we're only
- * doing one optimization pass. Otherwise, the binary tree
- * algorithm is the way to go. */
- if (max_window_size <= 32768 && lzx_params->num_optim_passes == 1)
- lzx_params->mf_algo = LZ_MF_HASH_CHAINS;
- else
- lzx_params->mf_algo = LZ_MF_BINARY_TREES;
-
- /* When doing near-optimal parsing, allow length 2 matches if
- * the compression level is sufficiently high. */
- if (compression_level >= 45)
- lzx_params->min_match_length = 2;
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, bt_mf_16) +
+ bt_matchfinder_size_16(max_bufsize);
else
- lzx_params->min_match_length = 3;
-
- /* Scale nice_match_length and max_search_depth with the
- * compression level. */
- lzx_params->nice_match_length = min(((u64)compression_level * 32) / 50,
- LZX_MAX_MATCH_LEN);
- lzx_params->max_search_depth = min(((u64)compression_level * 50) / 50,
- LZX_MAX_MATCH_LEN);
+ return offsetof(struct lzx_compressor, bt_mf_32) +
+ bt_matchfinder_size_32(max_bufsize);
}
}
-/* Given the internal compression parameters and maximum window size, build the
- * Lempel-Ziv match-finder parameters. */
-static void
-lzx_build_mf_params(const struct lzx_compressor_params *lzx_params,
- u32 max_window_size, struct lz_mf_params *mf_params)
-{
- memset(mf_params, 0, sizeof(*mf_params));
-
- mf_params->algorithm = lzx_params->mf_algo;
- mf_params->max_window_size = max_window_size;
- mf_params->min_match_len = lzx_params->min_match_length;
- mf_params->max_match_len = LZX_MAX_MATCH_LEN;
- mf_params->max_search_depth = lzx_params->max_search_depth;
- mf_params->nice_match_len = lzx_params->nice_match_length;
-}
-
-static void
-lzx_free_compressor(void *_c);
-
+/* Compute the amount of memory needed to allocate an LZX compressor. */
static u64
-lzx_get_needed_memory(size_t max_block_size, unsigned int compression_level)
+lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
+ bool destructive)
{
- struct lzx_compressor_params params;
u64 size = 0;
- unsigned window_order;
- u32 max_window_size;
- window_order = lzx_get_window_order(max_block_size);
- if (window_order == 0)
+ if (max_bufsize > LZX_MAX_WINDOW_SIZE)
return 0;
- max_window_size = max_block_size;
-
- lzx_build_params(compression_level, max_window_size, ¶ms);
-
- size += sizeof(struct lzx_compressor);
- /* cur_window */
- size += max_window_size;
-
- /* mf */
- size += lz_mf_get_needed_memory(params.mf_algo, max_window_size);
-
- /* cached_matches */
- if (params.num_optim_passes > 1)
- size += LZX_CACHE_LEN * sizeof(struct lz_match);
- else
- size += LZX_MAX_MATCHES_PER_POS * sizeof(struct lz_match);
+ size += lzx_get_compressor_size(max_bufsize, compression_level);
+ if (!destructive)
+ size += max_bufsize; /* account for in_buffer */
return size;
}
+/* Allocate an LZX compressor. */
static int
-lzx_create_compressor(size_t max_block_size, unsigned int compression_level,
- void **c_ret)
+lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
+ bool destructive, void **c_ret)
{
- struct lzx_compressor *c;
- struct lzx_compressor_params params;
- struct lz_mf_params mf_params;
unsigned window_order;
- u32 max_window_size;
+ struct lzx_compressor *c;
- window_order = lzx_get_window_order(max_block_size);
+ /* Validate the maximum buffer size and get the window order from it. */
+ window_order = lzx_get_window_order(max_bufsize);
if (window_order == 0)
return WIMLIB_ERR_INVALID_PARAM;
- max_window_size = max_block_size;
-
- lzx_build_params(compression_level, max_window_size, ¶ms);
- lzx_build_mf_params(¶ms, max_window_size, &mf_params);
- if (!lz_mf_params_valid(&mf_params))
- return WIMLIB_ERR_INVALID_PARAM;
- c = CALLOC(1, sizeof(struct lzx_compressor));
+ /* Allocate the compressor. */
+ c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
if (!c)
- goto oom;
+ goto oom0;
- c->params = params;
- c->num_main_syms = lzx_get_num_main_syms(window_order);
c->window_order = window_order;
+ c->num_main_syms = lzx_get_num_main_syms(window_order);
+ c->destructive = destructive;
+
+ /* Allocate the buffer for preprocessed data if needed. */
+ if (!c->destructive) {
+ c->in_buffer = MALLOC(max_bufsize);
+ if (!c->in_buffer)
+ goto oom1;
+ }
+
+ if (compression_level <= MAX_FAST_LEVEL) {
+
+ /* Fast compression: Use lazy parsing. */
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_lazy_16;
+ else
+ c->impl = lzx_compress_lazy_32;
- /* The window is allocated as 16-byte aligned to speed up memcpy() and
- * enable lzx_e8_filter() optimization on x86_64. */
- c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
- if (!c->cur_window)
- goto oom;
-
- c->mf = lz_mf_alloc(&mf_params);
- if (!c->mf)
- goto oom;
-
- if (params.num_optim_passes > 1) {
- c->cached_matches = MALLOC(LZX_CACHE_LEN *
- sizeof(struct lz_match));
- if (!c->cached_matches)
- goto oom;
- c->cache_limit = c->cached_matches + LZX_CACHE_LEN -
- (LZX_MAX_MATCHES_PER_POS + 1);
+ /* Scale max_search_depth and nice_match_length with the
+ * compression level. */
+ c->max_search_depth = (60 * compression_level) / 20;
+ c->nice_match_length = (80 * compression_level) / 20;
+
+ /* lzx_compress_lazy() needs max_search_depth >= 2 because it
+ * halves the max_search_depth when attempting a lazy match, and
+ * max_search_depth must be at least 1. */
+ c->max_search_depth = max(c->max_search_depth, 2);
} else {
- c->cached_matches = MALLOC(LZX_MAX_MATCHES_PER_POS *
- sizeof(struct lz_match));
- if (!c->cached_matches)
- goto oom;
+
+ /* Normal / high compression: Use near-optimal parsing. */
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_near_optimal_16;
+ else
+ c->impl = lzx_compress_near_optimal_32;
+
+ /* Scale max_search_depth and nice_match_length with the
+ * compression level. */
+ c->max_search_depth = (24 * compression_level) / 50;
+ c->nice_match_length = (48 * compression_level) / 50;
+
+ /* Also scale num_optim_passes with the compression level. But
+ * the more passes there are, the less they help --- so don't
+ * add them linearly. */
+ c->num_optim_passes = 1;
+ c->num_optim_passes += (compression_level >= 45);
+ c->num_optim_passes += (compression_level >= 70);
+ c->num_optim_passes += (compression_level >= 100);
+ c->num_optim_passes += (compression_level >= 150);
+ c->num_optim_passes += (compression_level >= 200);
+ c->num_optim_passes += (compression_level >= 300);
+
+ /* max_search_depth must be at least 1. */
+ c->max_search_depth = max(c->max_search_depth, 1);
}
- lzx_init_offset_slot_fast(c);
+ /* Prepare the offset => offset slot mapping. */
+ lzx_init_offset_slot_tabs(c);
*c_ret = c;
return 0;
-oom:
- lzx_free_compressor(c);
+oom1:
+ FREE(c);
+oom0:
return WIMLIB_ERR_NOMEM;
}
+/* Compress a buffer of data. */
static size_t
-lzx_compress(const void *uncompressed_data, size_t uncompressed_size,
- void *compressed_data, size_t compressed_size_avail, void *_c)
+lzx_compress(const void *restrict in, size_t in_nbytes,
+ void *restrict out, size_t out_nbytes_avail, void *restrict _c)
{
struct lzx_compressor *c = _c;
struct lzx_output_bitstream os;
- u32 num_chosen_items;
- const struct lzx_lens *prev_lens;
- u32 block_start_pos;
- u32 block_size;
- int block_type;
+ size_t result;
- /* Don't bother compressing very small inputs. */
- if (uncompressed_size < 100)
+ /* Don't bother trying to compress very small inputs. */
+ if (in_nbytes < 64)
return 0;
- /* The input data must be preprocessed. To avoid changing the original
- * input data, copy it to a temporary buffer. */
- memcpy(c->cur_window, uncompressed_data, uncompressed_size);
- c->cur_window_size = uncompressed_size;
-
- /* Preprocess the data. */
- lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
-
- /* Load the window into the match-finder. */
- lz_mf_load_window(c->mf, c->cur_window, c->cur_window_size);
-
- /* Initialize the match offset LRU queue. */
- lzx_lru_queue_init(&c->queue);
+ /* If the compressor is in "destructive" mode, then we can directly
+ * preprocess the input data. Otherwise, we need to copy it into an
+ * internal buffer first. */
+ if (!c->destructive) {
+ memcpy(c->in_buffer, in, in_nbytes);
+ in = c->in_buffer;
+ }
- /* Initialize the output bitstream. */
- lzx_init_output(&os, compressed_data, compressed_size_avail);
+ /* Preprocess the input data. */
+ lzx_preprocess((void *)in, in_nbytes);
- /* Compress the data block by block.
- *
- * TODO: The compression ratio could be slightly improved by performing
- * data-dependent block splitting instead of using fixed-size blocks.
- * Doing so well is a computationally hard problem, however. */
- block_start_pos = 0;
+ /* Initially, the previous Huffman codeword lengths are all zeroes. */
c->codes_index = 0;
- prev_lens = &c->zero_lens;
- do {
- /* Compute the block size. */
- block_size = min(LZX_DIV_BLOCK_SIZE,
- uncompressed_size - block_start_pos);
+ memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens));
- /* Reset symbol frequencies. */
- memset(&c->freqs, 0, sizeof(c->freqs));
+ /* Initialize the output bitstream. */
+ lzx_init_output(&os, out, out_nbytes_avail);
- /* Prepare the matches/literals for the block. */
- num_chosen_items = lzx_choose_items_for_block(c,
- block_start_pos,
- block_size);
+ /* Call the compression level-specific compress() function. */
+ (*c->impl)(c, in, in_nbytes, &os);
- /* Make the Huffman codes from the symbol frequencies. */
- lzx_make_huffman_codes(&c->freqs, &c->codes[c->codes_index],
- c->num_main_syms);
+ /* Flush the output bitstream. */
+ result = lzx_flush_output(&os);
- /* Choose the best block type.
- *
- * Note: we currently don't consider uncompressed blocks. */
- block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
- &c->codes[c->codes_index]);
-
- /* Write the compressed block to the output buffer. */
- lzx_write_compressed_block(block_type,
- block_size,
- c->window_order,
- c->num_main_syms,
- c->chosen_items,
- num_chosen_items,
- &c->codes[c->codes_index],
- prev_lens,
- &os);
-
- /* The current codeword lengths become the previous lengths. */
- prev_lens = &c->codes[c->codes_index].lens;
- c->codes_index ^= 1;
-
- block_start_pos += block_size;
-
- } while (block_start_pos != uncompressed_size);
-
- return lzx_flush_output(&os);
+ /* If the data did not compress to less than its original size and we
+ * preprocessed the original buffer, then postprocess it to restore it
+ * to its original state. */
+ if (result == 0 && c->destructive)
+ lzx_postprocess((void *)in, in_nbytes);
+
+ /* Return the number of compressed bytes, or 0 if the input did not
+ * compress to less than its original size. */
+ return result;
}
+/* Free an LZX compressor. */
static void
lzx_free_compressor(void *_c)
{
struct lzx_compressor *c = _c;
- if (c) {
- ALIGNED_FREE(c->cur_window);
- lz_mf_free(c->mf);
- FREE(c->cached_matches);
- FREE(c);
- }
+ if (!c->destructive)
+ FREE(c->in_buffer);
+ FREE(c);
}
const struct compressor_ops lzx_compressor_ops = {