X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=src%2Flzx_compress.c;h=2c521c63a32af76bd4c3108ba520c0dd5ce5bee1;hb=77ccb2c6301f3579c207af9fac59fbeecbb93209;hp=19e2daa2a97de024115e4b65ff0097661cd05a73;hpb=9eec31cd809e629d188493bd5b8a65f7d296dea4;p=wimlib diff --git a/src/lzx_compress.c b/src/lzx_compress.c index 19e2daa2..2c521c63 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -1,11 +1,11 @@ /* * lzx_compress.c * - * A compressor for the LZX compression format, as used in WIM files. + * A compressor for the LZX compression format, as used in WIM archives. */ /* - * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers + * Copyright (C) 2012-2016 Eric Biggers * * This file is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free @@ -26,19 +26,19 @@ * This file contains a compressor for the LZX ("Lempel-Ziv eXtended") * compression format, as used in the WIM (Windows IMaging) file format. * - * Two different parsing algorithms are implemented: "near-optimal" and "lazy". - * "Near-optimal" is significantly slower than "lazy", but results in a better - * compression ratio. The "near-optimal" algorithm is used at the default - * compression level. + * Two different LZX-compatible algorithms are implemented: "near-optimal" and + * "lazy". "Near-optimal" is significantly slower than "lazy", but results in a + * better compression ratio. The "near-optimal" algorithm is used at the + * default compression level. * * This file may need some slight modifications to be used outside of the WIM * format. In particular, in other situations the LZX block header might be * slightly different, and sliding window support might be required. * - * Note: LZX is a compression format derived from DEFLATE, the format used by - * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. - * Certain details are quite similar, such as the method for storing Huffman - * codes. However, the main differences are: + * LZX is a compression format derived from DEFLATE, the format used by zlib and + * gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain + * details are quite similar, such as the method for storing Huffman codes. + * However, the main differences are: * * - LZX preprocesses the data to attempt to make x86 machine code slightly more * compressible before attempting to compress it further. @@ -53,95 +53,130 @@ * ("verbatim" and "aligned"). * * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be - * useful, but generally only if the parser is smart about choosing them. + * useful, but generally only if the compressor is smart about choosing them. * * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue * of match offsets. This is very useful for certain types of files, such as * binary files that have repeating records. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +/******************************************************************************/ +/* General parameters */ +/*----------------------------------------------------------------------------*/ + +/* + * The compressor uses the faster algorithm at levels <= MAX_FAST_LEVEL. It + * uses the slower algorithm at levels > MAX_FAST_LEVEL. + */ +#define MAX_FAST_LEVEL 34 + +/* + * The compressor-side limits on the codeword lengths (in bits) for each Huffman + * code. To make outputting bits slightly faster, some of these limits are + * lower than the limits defined by the LZX format. This does not significantly + * affect the compression ratio. + */ +#define MAIN_CODEWORD_LIMIT 16 +#define LENGTH_CODEWORD_LIMIT 12 +#define ALIGNED_CODEWORD_LIMIT 7 +#define PRE_CODEWORD_LIMIT 7 + + +/******************************************************************************/ +/* Block splitting parameters */ +/*----------------------------------------------------------------------------*/ + +/* + * The compressor always outputs blocks of at least this size in bytes, except + * for the last block which may need to be smaller. + */ +#define MIN_BLOCK_SIZE 6500 /* - * Start a new LZX block (with new Huffman codes) after this many bytes. + * The compressor attempts to end a block when it reaches this size in bytes. + * The final size might be slightly larger due to matches extending beyond the + * end of the block. Specifically: * - * Note: actual block sizes may slightly exceed this value. + * - The near-optimal compressor may choose a match of up to LZX_MAX_MATCH_LEN + * bytes starting at the SOFT_MAX_BLOCK_SIZE'th byte. * - * TODO: recursive splitting and cost evaluation might be good for an extremely - * high compression mode, but otherwise it is almost always far too slow for how - * much it helps. Perhaps some sort of heuristic would be useful? + * - The lazy compressor may choose a sequence of literals starting at the + * SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasingly better + * matches. The final match may be up to LZX_MAX_MATCH_LEN bytes. The + * length of the literal sequence is approximately limited by the "nice match + * length" parameter. */ -#define LZX_DIV_BLOCK_SIZE 32768 +#define SOFT_MAX_BLOCK_SIZE 100000 /* - * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the - * match cache for each byte position. This value should be high enough so that - * nearly the time, all matches found in a given block can fit in the match - * cache. However, fallback behavior (immediately terminating the block) on - * cache overflow is still required. + * The number of observed items (matches and literals) that represents + * sufficient data for the compressor to decide whether the current block should + * be ended or not. */ -#define LZX_CACHE_PER_POS 7 +#define NUM_OBSERVATIONS_PER_BLOCK_CHECK 400 + + +/******************************************************************************/ +/* Parameters for slower algorithm */ +/*----------------------------------------------------------------------------*/ + +/* + * The log base 2 of the number of entries in the hash table for finding length + * 2 matches. This could be as high as 16, but using a smaller hash table + * speeds up compression due to reduced cache pressure. + */ +#define BT_MATCHFINDER_HASH2_ORDER 12 /* - * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache, - * excluding the extra "overflow" entries. The per-position multiplier is '1 + - * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an - * overhead of one lz_match per position, used to hold the match count at that - * position. + * The number of lz_match structures in the match cache, excluding the extra + * "overflow" entries. This value should be high enough so that nearly the + * time, all matches found in a given block can fit in the match cache. + * However, fallback behavior (immediately terminating the block) on cache + * overflow is still required. */ -#define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS)) +#define CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5) /* - * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can - * ever be saved in the match cache for a single position. Since each match we - * save for a single position has a distinct length, we can use the number of - * possible match lengths in LZX as this bound. This bound is guaranteed to be - * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then - * it will never actually be reached. + * An upper bound on the number of matches that can ever be saved in the match + * cache for a single position. Since each match we save for a single position + * has a distinct length, we can use the number of possible match lengths in LZX + * as this bound. This bound is guaranteed to be valid in all cases, although + * if 'nice_match_length < LZX_MAX_MATCH_LEN', then it will never actually be + * reached. */ -#define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS +#define MAX_MATCHES_PER_POS LZX_NUM_LENS /* - * LZX_BIT_COST is a scaling factor that represents the cost to output one bit. - * This makes it possible to consider fractional bit costs. + * A scaling factor that makes it possible to consider fractional bit costs. A + * single bit has a cost of (1 << COST_SHIFT). * * Note: this is only useful as a statistical trick for when the true costs are * unknown. In reality, each token in LZX requires a whole number of bits to * output. */ -#define LZX_BIT_COST 16 +#define COST_SHIFT 6 /* * Should the compressor take into account the costs of aligned offset symbols? */ -#define LZX_CONSIDER_ALIGNED_COSTS 1 +#define CONSIDER_ALIGNED_COSTS 1 /* - * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the - * faster algorithm. + * Should the "minimum" cost path search algorithm consider "gap" matches, where + * a normal match is followed by a literal, then by a match with the same + * offset? This is one specific, somewhat common situation in which the true + * minimum cost path is often different from the path found by looking only one + * edge ahead. */ -#define LZX_MAX_FAST_LEVEL 34 +#define CONSIDER_GAP_MATCHES 1 -/* - * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the - * hash table for finding length 2 matches. This could be as high as 16, but - * using a smaller hash table speeds up compression due to reduced cache - * pressure. - */ -#define BT_MATCHFINDER_HASH2_ORDER 12 +/******************************************************************************/ +/* Includes */ +/*----------------------------------------------------------------------------*/ -/* - * These are the compressor-side limits on the codeword lengths for each Huffman - * code. To make outputting bits slightly faster, some of these limits are - * lower than the limits defined by the LZX format. This does not significantly - * affect the compression ratio, at least for the block sizes we use. - */ -#define MAIN_CODEWORD_LIMIT 12 /* 64-bit: can buffer 4 main symbols */ -#define LENGTH_CODEWORD_LIMIT 12 -#define ALIGNED_CODEWORD_LIMIT 7 -#define PRE_CODEWORD_LIMIT 7 +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif #include "wimlib/compress_common.h" #include "wimlib/compressor_ops.h" @@ -151,6 +186,9 @@ #include "wimlib/unaligned.h" #include "wimlib/util.h" +/* Note: BT_MATCHFINDER_HASH2_ORDER must be defined *before* including + * bt_matchfinder.h. */ + /* Matchfinders with 16-bit positions */ #define mf_pos_t u16 #define MF_SUFFIX _16 @@ -165,60 +203,62 @@ #include "wimlib/bt_matchfinder.h" #include "wimlib/hc_matchfinder.h" -struct lzx_output_bitstream; +/******************************************************************************/ +/* Compressor structure */ +/*----------------------------------------------------------------------------*/ -/* Codewords for the LZX Huffman codes. */ +/* Codewords for the Huffman codes */ struct lzx_codewords { u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; u32 len[LZX_LENCODE_NUM_SYMBOLS]; u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; -/* Codeword lengths (in bits) for the LZX Huffman codes. - * A zero length means the corresponding codeword has zero frequency. */ +/* + * Codeword lengths, in bits, for the Huffman codes. + * + * A codeword length of 0 means the corresponding codeword has zero frequency. + * + * The main and length codes each have one extra entry for use as a sentinel. + * See lzx_write_compressed_code(). + */ struct lzx_lens { u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1]; u8 len[LZX_LENCODE_NUM_SYMBOLS + 1]; u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; -/* Cost model for near-optimal parsing */ -struct lzx_costs { - - /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a - * length 'len' match that has an offset belonging to 'offset_slot'. */ - u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS]; - - /* Cost for each symbol in the main code */ - u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; - - /* Cost for each symbol in the length code */ - u32 len[LZX_LENCODE_NUM_SYMBOLS]; - -#if LZX_CONSIDER_ALIGNED_COSTS - /* Cost for each symbol in the aligned code */ - u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; -#endif -}; - -/* Codewords and lengths for the LZX Huffman codes. */ +/* Codewords and lengths for the Huffman codes */ struct lzx_codes { struct lzx_codewords codewords; struct lzx_lens lens; }; -/* Symbol frequency counters for the LZX Huffman codes. */ +/* Symbol frequency counters for the Huffman-encoded alphabets */ struct lzx_freqs { u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; u32 len[LZX_LENCODE_NUM_SYMBOLS]; u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; +/* Block split statistics. See the "Block splitting algorithm" section later in + * this file for details. */ +#define NUM_LITERAL_OBSERVATION_TYPES 8 +#define NUM_MATCH_OBSERVATION_TYPES 2 +#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + \ + NUM_MATCH_OBSERVATION_TYPES) +struct lzx_block_split_stats { + u32 new_observations[NUM_OBSERVATION_TYPES]; + u32 observations[NUM_OBSERVATION_TYPES]; + u32 num_new_observations; + u32 num_observations; +}; + /* * Represents a run of literals followed by a match or end-of-block. This - * struct is needed to temporarily store items chosen by the parser, since items - * cannot be written until all items for the block have been chosen and the - * block's Huffman codes have been computed. + * structure is needed to temporarily store items chosen by the compressor, + * since items cannot be written until all items for the block have been chosen + * and the block's Huffman codes have been computed. */ struct lzx_sequence { @@ -260,7 +300,8 @@ struct lzx_optimum_node { * change as progressively lower cost paths are found to reach this * position. * - * This variable is divided into two bitfields. + * For non-gap matches, this variable is divided into two bitfields + * whose meanings depend on the item type: * * Literals: * Low bits are 0, high bits are the literal. @@ -270,184 +311,128 @@ struct lzx_optimum_node { * * Repeat offset matches: * Low bits are the match length, high bits are the queue index. + * + * For gap matches, identified by OPTIMUM_GAP_MATCH set, special + * behavior applies --- see the code. */ u32 item; #define OPTIMUM_OFFSET_SHIFT 9 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1) -} _aligned_attribute(8); - -/* - * Least-recently-used queue for match offsets. - * - * This is represented as a 64-bit integer for efficiency. There are three - * offsets of 21 bits each. Bit 64 is garbage. - */ -struct lzx_lru_queue { - u64 R; -}; - -#define LZX_QUEUE64_OFFSET_SHIFT 21 -#define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1) - -#define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT) -#define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT) -#define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT) - -#define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT) -#define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT) -#define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT) +#if CONSIDER_GAP_MATCHES +# define OPTIMUM_GAP_MATCH 0x80000000 +#endif -static inline void -lzx_lru_queue_init(struct lzx_lru_queue *queue) -{ - queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) | - ((u64)1 << LZX_QUEUE64_R1_SHIFT) | - ((u64)1 << LZX_QUEUE64_R2_SHIFT); -} +} _aligned_attribute(8); -static inline u64 -lzx_lru_queue_R0(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} +/* The cost model for near-optimal parsing */ +struct lzx_costs { -static inline u64 -lzx_lru_queue_R1(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} + /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a + * length 'len' match that has an offset belonging to 'offset_slot'. */ + u16 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS]; -static inline u64 -lzx_lru_queue_R2(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} + /* Cost for each symbol in the main code */ + u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; -/* Push a match offset onto the front (most recently used) end of the queue. */ -static inline struct lzx_lru_queue -lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset) -{ - return (struct lzx_lru_queue) { - .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset, - }; -} + /* Cost for each symbol in the length code */ + u32 len[LZX_LENCODE_NUM_SYMBOLS]; -/* Swap a match offset to the front of the queue. */ -static inline struct lzx_lru_queue -lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx) -{ - if (idx == 0) - return queue; - - if (idx == 1) - return (struct lzx_lru_queue) { - .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) | - (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) | - (queue.R & LZX_QUEUE64_R2_MASK), - }; +#if CONSIDER_ALIGNED_COSTS + /* Cost for each symbol in the aligned code */ + u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; +#endif +}; - return (struct lzx_lru_queue) { - .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) | - (queue.R & LZX_QUEUE64_R1_MASK) | - (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT), - }; -} +struct lzx_output_bitstream; /* The main LZX compressor structure */ struct lzx_compressor { - /* The "nice" match length: if a match of this length is found, then - * choose it immediately without further consideration. */ - unsigned nice_match_length; + /* The buffer for preprocessed input data, if not using destructive + * compression */ + void *in_buffer; - /* The maximum search depth: consider at most this many potential - * matches at each position. */ - unsigned max_search_depth; + /* If true, the compressor need not preserve the input buffer if it + * compresses the data successfully. */ + bool destructive; + + /* Pointer to the compress() implementation chosen at allocation time */ + void (*impl)(struct lzx_compressor *, const u8 *, size_t, + struct lzx_output_bitstream *); - /* The log base 2 of the LZX window size for LZ match offset encoding + /* The log base 2 of the window size for LZ match offset encoding * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <= - * LZX_MAX_WINDOW_ORDER. */ + * LZX_MAX_WINDOW_ORDER. */ unsigned window_order; - /* The number of symbols in the main alphabet. This depends on - * @window_order, since @window_order determines the maximum possible - * offset. */ + /* The number of symbols in the main alphabet. This depends on the + * window order, since the window order determines the maximum possible + * match offset. */ unsigned num_main_syms; - /* Number of optimization passes per block */ - unsigned num_optim_passes; - - /* The preprocessed buffer of data being compressed */ - u8 *in_buffer; - - /* The number of bytes of data to be compressed, which is the number of - * bytes of data in @in_buffer that are actually valid. */ - size_t in_nbytes; + /* The "nice" match length: if a match of this length is found, then it + * is chosen immediately without further consideration. */ + unsigned nice_match_length; - /* Pointer to the compress() implementation chosen at allocation time */ - void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *); + /* The maximum search depth: at most this many potential matches are + * considered at each position. */ + unsigned max_search_depth; - /* If true, the compressor need not preserve the input buffer if it - * compresses the data successfully. */ - bool destructive; + /* The number of optimization passes per block */ + unsigned num_optim_passes; - /* The Huffman symbol frequency counters for the current block. */ + /* The symbol frequency counters for the current block */ struct lzx_freqs freqs; + /* Block split statistics */ + struct lzx_block_split_stats split_stats; + /* The Huffman codes for the current and previous blocks. The one with * index 'codes_index' is for the current block, and the other one is - * for the previous block. */ + * for the previous block. */ struct lzx_codes codes[2]; unsigned codes_index; - /* The matches and literals that the parser has chosen for the current - * block. The required length of this array is limited by the maximum - * number of matches that can ever be chosen for a single block, plus - * one for the special entry at the end. */ + /* The matches and literals that the compressor has chosen for the + * current block. The required length of this array is limited by the + * maximum number of matches that can ever be chosen for a single block, + * plus one for the special entry at the end. */ struct lzx_sequence chosen_sequences[ - DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1]; - - /* Tables for mapping adjusted offsets to offset slots */ + DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1]; - /* offset slots [0, 29] */ - u8 offset_slot_tab_1[32768]; - - /* offset slots [30, 49] */ - u8 offset_slot_tab_2[128]; + /* Tables for mapping adjusted offsets to offset slots */ + u8 offset_slot_tab_1[32768]; /* offset slots [0, 29] */ + u8 offset_slot_tab_2[128]; /* offset slots [30, 49] */ union { - /* Data for greedy or lazy parsing */ + /* Data for lzx_compress_lazy() */ struct { - /* Hash chains matchfinder (MUST BE LAST!!!) */ + /* Hash chains matchfinder (MUST BE LAST!!!) */ union { struct hc_matchfinder_16 hc_mf_16; struct hc_matchfinder_32 hc_mf_32; }; }; - /* Data for near-optimal parsing */ + /* Data for lzx_compress_near_optimal() */ struct { /* - * The graph nodes for the current block. - * - * We need at least 'LZX_DIV_BLOCK_SIZE + - * LZX_MAX_MATCH_LEN - 1' nodes because that is the - * maximum block size that may be used. Add 1 because - * we need a node to represent end-of-block. + * Array of nodes, one per position, for running the + * minimum-cost path algorithm. * - * It is possible that nodes past end-of-block are - * accessed during match consideration, but this can - * only occur if the block was truncated at - * LZX_DIV_BLOCK_SIZE. So the same bound still applies. - * Note that since nodes past the end of the block will - * never actually have an effect on the items that are - * chosen for the block, it makes no difference what - * their costs are initialized to (if anything). + * This array must be large enough to accommodate the + * worst-case number of nodes, which occurs if the + * compressor finds a match of length LZX_MAX_MATCH_LEN + * at position 'SOFT_MAX_BLOCK_SIZE - 1', producing a + * block of size 'SOFT_MAX_BLOCK_SIZE - 1 + + * LZX_MAX_MATCH_LEN'. Add one for the end-of-block + * node. */ - struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE + - LZX_MAX_MATCH_LEN - 1 + 1]; + struct lzx_optimum_node optimum_nodes[ + SOFT_MAX_BLOCK_SIZE - 1 + + LZX_MAX_MATCH_LEN + 1]; - /* The cost model for the current block */ + /* The cost model for the current optimization pass */ struct lzx_costs costs; /* @@ -463,21 +448,20 @@ struct lzx_compressor { * Note: in rare cases, there will be a very high number * of matches in the block and this array will overflow. * If this happens, we force the end of the current - * block. LZX_CACHE_LENGTH is the length at which we + * block. CACHE_LENGTH is the length at which we * actually check for overflow. The extra slots beyond * this are enough to absorb the worst case overflow, - * which occurs if starting at - * &match_cache[LZX_CACHE_LENGTH - 1], we write the - * match count header, then write - * LZX_MAX_MATCHES_PER_POS matches, then skip searching - * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and + * which occurs if starting at &match_cache[CACHE_LENGTH + * - 1], we write the match count header, then write + * MAX_MATCHES_PER_POS matches, then skip searching for + * matches at 'LZX_MAX_MATCH_LEN - 1' positions and * write the match count header for each. */ - struct lz_match match_cache[LZX_CACHE_LENGTH + - LZX_MAX_MATCHES_PER_POS + + struct lz_match match_cache[CACHE_LENGTH + + MAX_MATCHES_PER_POS + LZX_MAX_MATCH_LEN - 1]; - /* Binary trees matchfinder (MUST BE LAST!!!) */ + /* Binary trees matchfinder (MUST BE LAST!!!) */ union { struct bt_matchfinder_16 bt_mf_16; struct bt_matchfinder_32 bt_mf_32; @@ -486,6 +470,10 @@ struct lzx_compressor { }; }; +/******************************************************************************/ +/* Matchfinder utilities */ +/*----------------------------------------------------------------------------*/ + /* * Will a matchfinder using 16-bit positions be sufficient for compressing * buffers of up to the specified size? The limit could be 65536 bytes, but we @@ -500,6 +488,18 @@ lzx_is_16_bit(size_t max_bufsize) return max_bufsize <= 32768; } +/* + * Return the offset slot for the specified adjusted match offset. + */ +static inline unsigned +lzx_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset, + bool is_16_bit) +{ + if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1)) + return c->offset_slot_tab_1[adjusted_offset]; + return c->offset_slot_tab_2[adjusted_offset >> 14]; +} + /* * The following macros call either the 16-bit or the 32-bit version of a * matchfinder function based on the value of 'is_16_bit', which will be known @@ -514,46 +514,45 @@ lzx_is_16_bit(size_t max_bufsize) ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \ CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__)); +/******************************************************************************/ +/* Output bitstream */ +/*----------------------------------------------------------------------------*/ + +/* + * The LZX bitstream is encoded as a sequence of little endian 16-bit coding + * units. Bits are ordered from most significant to least significant within + * each coding unit. + */ + /* * Structure to keep track of the current state of sending bits to the * compressed output buffer. - * - * The LZX bitstream is encoded as a sequence of 16-bit coding units. */ struct lzx_output_bitstream { - /* Bits that haven't yet been written to the output buffer. */ + /* Bits that haven't yet been written to the output buffer */ machine_word_t bitbuf; - /* Number of bits currently held in @bitbuf. */ - u32 bitcount; + /* Number of bits currently held in @bitbuf */ + machine_word_t bitcount; - /* Pointer to the start of the output buffer. */ + /* Pointer to the start of the output buffer */ u8 *start; /* Pointer to the position in the output buffer at which the next coding - * unit should be written. */ + * unit should be written */ u8 *next; /* Pointer just past the end of the output buffer, rounded down to a - * 2-byte boundary. */ + * 2-byte boundary */ u8 *end; }; /* Can the specified number of bits always be added to 'bitbuf' after any * pending 16-bit coding units have been flushed? */ -#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15) +#define CAN_BUFFER(n) ((n) <= WORDBITS - 15) -/* - * Initialize the output bitstream. - * - * @os - * The output bitstream structure to initialize. - * @buffer - * The buffer being written to. - * @size - * Size of @buffer, in bytes. - */ +/* Initialize the output bitstream to write to the specified buffer. */ static void lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size) { @@ -564,8 +563,10 @@ lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size) os->end = os->start + (size & ~1); } -/* Add some bits to the bitbuffer variable of the output bitstream. The caller - * must make sure there is enough room. */ +/* + * Add some bits to the bitbuffer variable of the output bitstream. The caller + * must make sure there is enough room. + */ static inline void lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) { @@ -573,16 +574,18 @@ lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) os->bitcount += num_bits; } -/* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits' +/* + * Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits' * specifies the maximum number of bits that may have been added since the last - * flush. */ + * flush. + */ static inline void lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits) { /* Masking the number of bits to shift is only needed to avoid undefined * behavior; we don't actually care about the results of bad shifts. On * x86, the explicit masking generates no extra code. */ - const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1; + const u32 shift_mask = WORDBITS - 1; if (os->end - os->next < 6) return; @@ -610,7 +613,7 @@ lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) * Flush the last coding unit to the output buffer if needed. Return the total * number of bytes written to the output buffer, or 0 if an overflow occurred. */ -static u32 +static size_t lzx_flush_output(struct lzx_output_bitstream *os) { if (os->end - os->next < 6) @@ -624,35 +627,39 @@ lzx_flush_output(struct lzx_output_bitstream *os) return os->next - os->start; } -/* Build the main, length, and aligned offset Huffman codes used in LZX. - * - * This takes as input the frequency tables for each code and produces as output - * a set of tables that map symbols to codewords and codeword lengths. */ +/******************************************************************************/ +/* Preparing Huffman codes */ +/*----------------------------------------------------------------------------*/ + +/* + * Build the Huffman codes. This takes as input the frequency tables for each + * code and produces as output a set of tables that map symbols to codewords and + * codeword lengths. + */ static void -lzx_make_huffman_codes(struct lzx_compressor *c) +lzx_build_huffman_codes(struct lzx_compressor *c) { const struct lzx_freqs *freqs = &c->freqs; struct lzx_codes *codes = &c->codes[c->codes_index]; STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 && MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN); - STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 && - LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN); - STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS && - ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN); - make_canonical_huffman_code(c->num_main_syms, MAIN_CODEWORD_LIMIT, freqs->main, codes->lens.main, codes->codewords.main); + STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 && + LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN); make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS, LENGTH_CODEWORD_LIMIT, freqs->len, codes->lens.len, codes->codewords.len); + STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS && + ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN); make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS, ALIGNED_CODEWORD_LIMIT, freqs->aligned, @@ -660,7 +667,7 @@ lzx_make_huffman_codes(struct lzx_compressor *c) codes->codewords.aligned); } -/* Reset the symbol frequencies for the LZX Huffman codes. */ +/* Reset the symbol frequencies for the current block. */ static void lzx_reset_symbol_frequencies(struct lzx_compressor *c) { @@ -756,6 +763,10 @@ lzx_compute_precode_items(const u8 lens[restrict], return itemptr - precode_items; } +/******************************************************************************/ +/* Outputting compressed data */ +/*----------------------------------------------------------------------------*/ + /* * Output a Huffman code in the compressed form used in LZX. * @@ -892,28 +903,25 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, if (litrunlen) { /* Is the literal run nonempty? */ /* Verify optimization is enabled on 64-bit */ - STATIC_ASSERT(sizeof(machine_word_t) < 8 || - CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)); + STATIC_ASSERT(WORDBITS < 64 || + CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)); - if (CAN_BUFFER(4 * MAIN_CODEWORD_LIMIT)) { + if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) { - /* 64-bit: write 4 literals at a time. */ - while (litrunlen >= 4) { + /* 64-bit: write 3 literals at a time. */ + while (litrunlen >= 3) { unsigned lit0 = block_data[0]; unsigned lit1 = block_data[1]; unsigned lit2 = block_data[2]; - unsigned lit3 = block_data[3]; lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]); lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]); lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]); - lzx_add_bits(os, codes->codewords.main[lit3], - codes->lens.main[lit3]); - lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT); - block_data += 4; - litrunlen -= 4; + lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT); + block_data += 3; + litrunlen -= 3; } if (litrunlen--) { unsigned lit = *block_data++; @@ -923,14 +931,7 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, unsigned lit = *block_data++; lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]); - if (litrunlen--) { - unsigned lit = *block_data++; - lzx_add_bits(os, codes->codewords.main[lit], - codes->lens.main[lit]); - lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT); - } else { - lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT); - } + lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT); } else { lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT); } @@ -968,7 +969,7 @@ lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, 14 + ALIGNED_CODEWORD_LIMIT) /* Verify optimization is enabled on 64-bit */ - STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS)); + STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS)); /* Output the main symbol for the match. */ @@ -1038,21 +1039,24 @@ lzx_write_compressed_block(const u8 *block_begin, * LZX_BLOCKTYPE_* constants. */ lzx_write_bits(os, block_type, 3); - /* Output the block size. + /* + * Output the block size. * - * The original LZX format seemed to always encode the block size in 3 - * bytes. However, the implementation in WIMGAPI, as used in WIM files, - * uses the first bit to indicate whether the block is the default size - * (32768) or a different size given explicitly by the next 16 bits. + * The original LZX format encoded the block size in 24 bits. However, + * the LZX format used in WIM archives uses 1 bit to specify whether the + * block has the default size of 32768 bytes, then optionally 16 bits to + * specify a non-default size. This works fine for Microsoft's WIM + * software (WIMGAPI), which never compresses more than 32768 bytes at a + * time with LZX. However, as an extension, our LZX compressor supports + * compressing up to 2097152 bytes, with a corresponding increase in + * window size. It is possible for blocks in these larger buffers to + * exceed 65535 bytes; such blocks cannot have their size represented in + * 16 bits. * - * By default, this compressor uses a window size of 32768 and therefore - * follows the WIMGAPI behavior. However, this compressor also supports - * window sizes greater than 32768 bytes, which do not appear to be - * supported by WIMGAPI. In such cases, we retain the default size bit - * to mean a size of 32768 bytes but output non-default block size in 24 - * bits rather than 16. The compatibility of this behavior is unknown - * because WIMs created with chunk size greater than 32768 can seemingly - * only be opened by wimlib anyway. */ + * The chosen solution was to use 24 bits for the block size when + * possibly required --- specifically, when the compressor has been + * allocated to be capable of compressing more than 32768 bytes at once. + */ if (block_size == LZX_DEFAULT_BLOCK_SIZE) { lzx_write_bits(os, 1, 1); } else { @@ -1089,10 +1093,12 @@ lzx_write_compressed_block(const u8 *block_begin, lzx_write_sequences(os, block_type, block_begin, sequences, codes); } -/* Given the frequencies of symbols in an LZX-compressed block and the +/* + * Given the frequencies of symbols in an LZX-compressed block and the * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively, - * will take fewer bits to output. */ + * will take fewer bits to output. + */ static int lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs, const struct lzx_codes * codes) @@ -1100,15 +1106,17 @@ lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs, u32 aligned_cost = 0; u32 verbatim_cost = 0; - /* A verbatim block requires 3 bits in each place that an aligned symbol - * would be used in an aligned offset block. */ + /* A verbatim block requires 3 bits in each place that an aligned offset + * symbol would be used in an aligned offset block. */ for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i]; aligned_cost += codes->lens.aligned[i] * freqs->aligned[i]; } - /* Account for output of the aligned offset code. */ - aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS; + /* Account for the cost of sending the codeword lengths of the aligned + * offset code. */ + aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * + LZX_ALIGNEDCODE_NUM_SYMBOLS; if (aligned_cost < verbatim_cost) return LZX_BLOCKTYPE_ALIGNED; @@ -1117,33 +1125,20 @@ lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs, } /* - * Return the offset slot for the specified adjusted match offset, using the - * compressor's acceleration tables to speed up the mapping. - */ -static inline unsigned -lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset, - bool is_16_bit) -{ - if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1)) - return c->offset_slot_tab_1[adjusted_offset]; - return c->offset_slot_tab_2[adjusted_offset >> 14]; -} - -/* - * Finish an LZX block: + * Flush an LZX block: * - * - build the Huffman codes - * - decide whether to output the block as VERBATIM or ALIGNED - * - output the block - * - swap the indices of the current and previous Huffman codes + * 1. Build the Huffman codes. + * 2. Decide whether to output the block as VERBATIM or ALIGNED. + * 3. Write the block. + * 4. Swap the indices of the current and previous Huffman codes. */ static void -lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, - const u8 *block_begin, u32 block_size, u32 seq_idx) +lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, + const u8 *block_begin, u32 block_size, u32 seq_idx) { int block_type; - lzx_make_huffman_codes(c); + lzx_build_huffman_codes(c); block_type = lzx_choose_verbatim_or_aligned(&c->freqs, &c->codes[c->codes_index]); @@ -1159,242 +1154,356 @@ lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, c->codes_index ^= 1; } -/* Tally the Huffman symbol for a literal and increment the literal run length. +/******************************************************************************/ +/* Block splitting algorithm */ +/*----------------------------------------------------------------------------*/ + +/* + * The problem of block splitting is to decide when it is worthwhile to start a + * new block with new entropy codes. There is a theoretically optimal solution: + * recursively consider every possible block split, considering the exact cost + * of each block, and choose the minimum cost approach. But this is far too + * slow. Instead, as an approximation, we can count symbols and after every N + * symbols, compare the expected distribution of symbols based on the previous + * data with the actual distribution. If they differ "by enough", then start a + * new block. + * + * As an optimization and heuristic, we don't distinguish between every symbol + * but rather we combine many symbols into a single "observation type". For + * literals we only look at the high bits and low bits, and for matches we only + * look at whether the match is long or not. The assumption is that for typical + * "real" data, places that are good block boundaries will tend to be noticable + * based only on changes in these aggregate frequencies, without looking for + * subtle differences in individual symbols. For example, a change from ASCII + * bytes to non-ASCII bytes, or from few matches (generally less compressible) + * to many matches (generally more compressible), would be easily noticed based + * on the aggregates. + * + * For determining whether the frequency distributions are "different enough" to + * start a new block, the simply heuristic of splitting when the sum of absolute + * differences exceeds a constant seems to be good enough. + * + * Finally, for an approximation, it is not strictly necessary that the exact + * symbols being used are considered. With "near-optimal parsing", for example, + * the actual symbols that will be used are unknown until after the block + * boundary is chosen and the block has been optimized. Since the final choices + * cannot be used, we can use preliminary "greedy" choices instead. */ + +/* Initialize the block split statistics when starting a new block. */ +static void +lzx_init_block_split_stats(struct lzx_block_split_stats *stats) +{ + for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) { + stats->new_observations[i] = 0; + stats->observations[i] = 0; + } + stats->num_new_observations = 0; + stats->num_observations = 0; +} + +/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the + * literal, for 8 possible literal observation types. */ static inline void -lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p) +lzx_observe_literal(struct lzx_block_split_stats *stats, u8 lit) { - c->freqs.main[literal]++; - ++*litrunlen_p; + stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++; + stats->num_new_observations++; } -/* Tally the Huffman symbol for a match, save the match data and the length of - * the preceding literal run in the next lzx_sequence, and update the recent - * offsets queue. */ +/* Match observation. Heuristic: use one observation type for "short match" and + * one observation type for "long match". */ static inline void -lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data, - u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit, - u32 *litrunlen_p, struct lzx_sequence **next_seq_p) +lzx_observe_match(struct lzx_block_split_stats *stats, unsigned length) { - u32 litrunlen = *litrunlen_p; - struct lzx_sequence *next_seq = *next_seq_p; - unsigned offset_slot; - unsigned v; + stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++; + stats->num_new_observations++; +} - v = length - LZX_MIN_MATCH_LEN; +static bool +lzx_end_block_check(struct lzx_block_split_stats *stats) +{ + if (stats->num_observations > 0) { + + /* Note: to avoid slow divisions, we do not divide by + * 'num_observations', but rather do all math with the numbers + * multiplied by 'num_observations'. */ + u32 total_delta = 0; + for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) { + u32 expected = stats->observations[i] * + stats->num_new_observations; + u32 actual = stats->new_observations[i] * + stats->num_observations; + u32 delta = (actual > expected) ? actual - expected : + expected - actual; + total_delta += delta; + } - /* Save the literal run length and adjusted length. */ - next_seq->litrunlen = litrunlen; - next_seq->adjusted_length = v; + /* Ready to end the block? */ + if (total_delta >= + stats->num_new_observations * 7 / 8 * stats->num_observations) + return true; + } - /* Compute the length header and tally the length symbol if needed */ - if (v >= LZX_NUM_PRIMARY_LENS) { - c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; - v = LZX_NUM_PRIMARY_LENS; + for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) { + stats->num_observations += stats->new_observations[i]; + stats->observations[i] += stats->new_observations[i]; + stats->new_observations[i] = 0; } + stats->num_new_observations = 0; + return false; +} - /* Compute the offset slot */ - offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit); +static inline bool +lzx_should_end_block(struct lzx_block_split_stats *stats, + const u8 *in_block_begin, const u8 *in_next, const u8 *in_end) +{ + /* Ready to check block split statistics? */ + if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK || + in_next - in_block_begin < MIN_BLOCK_SIZE || + in_end - in_next < MIN_BLOCK_SIZE) + return false; - /* Compute the match header. */ - v += offset_slot * LZX_NUM_LEN_HEADERS; + return lzx_end_block_check(stats); +} - /* Save the adjusted offset and match header. */ - next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v; +/******************************************************************************/ +/* Slower ("near-optimal") compression algorithm */ +/*----------------------------------------------------------------------------*/ - /* Tally the main symbol. */ - c->freqs.main[LZX_NUM_CHARS + v]++; +/* + * Least-recently-used queue for match offsets. + * + * This is represented as a 64-bit integer for efficiency. There are three + * offsets of 21 bits each. Bit 64 is garbage. + */ +struct lzx_lru_queue { + u64 R; +}; - /* Update the recent offsets queue. */ - if (offset_data < LZX_NUM_RECENT_OFFSETS) { - /* Repeat offset match */ - swap(recent_offsets[0], recent_offsets[offset_data]); - } else { - /* Explicit offset match */ +#define LZX_QUEUE_OFFSET_SHIFT 21 +#define LZX_QUEUE_OFFSET_MASK (((u64)1 << LZX_QUEUE_OFFSET_SHIFT) - 1) - /* Tally the aligned offset symbol if needed */ - if (offset_data >= 16) - c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++; +#define LZX_QUEUE_R0_SHIFT (0 * LZX_QUEUE_OFFSET_SHIFT) +#define LZX_QUEUE_R1_SHIFT (1 * LZX_QUEUE_OFFSET_SHIFT) +#define LZX_QUEUE_R2_SHIFT (2 * LZX_QUEUE_OFFSET_SHIFT) - recent_offsets[2] = recent_offsets[1]; - recent_offsets[1] = recent_offsets[0]; - recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT; - } +#define LZX_QUEUE_R0_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R0_SHIFT) +#define LZX_QUEUE_R1_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R1_SHIFT) +#define LZX_QUEUE_R2_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R2_SHIFT) - /* Reset the literal run length and advance to the next sequence. */ - *next_seq_p = next_seq + 1; - *litrunlen_p = 0; +#define LZX_QUEUE_INITIALIZER { \ + ((u64)1 << LZX_QUEUE_R0_SHIFT) | \ + ((u64)1 << LZX_QUEUE_R1_SHIFT) | \ + ((u64)1 << LZX_QUEUE_R2_SHIFT) } + +static inline u64 +lzx_lru_queue_R0(struct lzx_lru_queue queue) +{ + return (queue.R >> LZX_QUEUE_R0_SHIFT) & LZX_QUEUE_OFFSET_MASK; } -/* Finish the last lzx_sequence. The last lzx_sequence is just a literal run; - * there is no match. This literal run may be empty. */ -static inline void -lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen) +static inline u64 +lzx_lru_queue_R1(struct lzx_lru_queue queue) { - last_seq->litrunlen = litrunlen; + return (queue.R >> LZX_QUEUE_R1_SHIFT) & LZX_QUEUE_OFFSET_MASK; +} - /* Special value to mark last sequence */ - last_seq->adjusted_offset_and_match_hdr = 0x80000000; +static inline u64 +lzx_lru_queue_R2(struct lzx_lru_queue queue) +{ + return (queue.R >> LZX_QUEUE_R2_SHIFT) & LZX_QUEUE_OFFSET_MASK; } -/* - * Given the minimum-cost path computed through the item graph for the current - * block, walk the path and count how many of each symbol in each Huffman-coded - * alphabet would be required to output the items (matches and literals) along - * the path. - * - * Note that the path will be walked backwards (from the end of the block to the - * beginning of the block), but this doesn't matter because this function only - * computes frequencies. - */ -static inline void -lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) +/* Push a match offset onto the front (most recently used) end of the queue. */ +static inline struct lzx_lru_queue +lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset) { - u32 node_idx = block_size; - for (;;) { - u32 len; - u32 offset_data; - unsigned v; - unsigned offset_slot; - - /* Tally literals until either a match or the beginning of the - * block is reached. */ - for (;;) { - u32 item = c->optimum_nodes[node_idx].item; - - len = item & OPTIMUM_LEN_MASK; - offset_data = item >> OPTIMUM_OFFSET_SHIFT; - - if (len != 0) /* Not a literal? */ - break; - - /* Tally the main symbol for the literal. */ - c->freqs.main[offset_data]++; - - if (--node_idx == 0) /* Beginning of block was reached? */ - return; - } - - node_idx -= len; - - /* Tally a match. */ - - /* Tally the aligned offset symbol if needed. */ - if (offset_data >= 16) - c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++; - - /* Tally the length symbol if needed. */ - v = len - LZX_MIN_MATCH_LEN;; - if (v >= LZX_NUM_PRIMARY_LENS) { - c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; - v = LZX_NUM_PRIMARY_LENS; - } + return (struct lzx_lru_queue) { + .R = (queue.R << LZX_QUEUE_OFFSET_SHIFT) | offset, + }; +} - /* Tally the main symbol. */ - offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit); - v += offset_slot * LZX_NUM_LEN_HEADERS; - c->freqs.main[LZX_NUM_CHARS + v]++; +/* Swap a match offset to the front of the queue. */ +static inline struct lzx_lru_queue +lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx) +{ + unsigned shift = idx * 21; + const u64 mask = LZX_QUEUE_R0_MASK; + const u64 mask_high = mask << shift; - if (node_idx == 0) /* Beginning of block was reached? */ - return; - } + return (struct lzx_lru_queue) { + .R = (queue.R & ~(mask | mask_high)) | + ((queue.R & mask_high) >> shift) | + ((queue.R & mask) << shift), + }; } -/* - * Like lzx_tally_item_list(), but this function also generates the list of - * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences, - * ready to be output to the bitstream after the Huffman codes are computed. - * The lzx_sequences will be written to decreasing memory addresses as the path - * is walked backwards, which means they will end up in the expected - * first-to-last order. The return value is the index in c->chosen_sequences at - * which the lzx_sequences begin. - */ static inline u32 -lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) +lzx_walk_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit, + bool record) { u32 node_idx = block_size; u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1; u32 lit_start_node; - /* Special value to mark last sequence */ - c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000; + if (record) { + /* Special value to mark last sequence */ + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000; + lit_start_node = node_idx; + } - lit_start_node = node_idx; for (;;) { + u32 item; u32 len; u32 offset_data; unsigned v; unsigned offset_slot; - /* Record literals until either a match or the beginning of the - * block is reached. */ + /* Tally literals until either a match or the beginning of the + * block is reached. Note: the item in the node at the + * beginning of the block has all bits set, causing this loop to + * end when it is reached. */ for (;;) { - u32 item = c->optimum_nodes[node_idx].item; + item = c->optimum_nodes[node_idx].item; + if (item & OPTIMUM_LEN_MASK) + break; + c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++; + node_idx--; + } - len = item & OPTIMUM_LEN_MASK; - offset_data = item >> OPTIMUM_OFFSET_SHIFT; + #if CONSIDER_GAP_MATCHES + if (item & OPTIMUM_GAP_MATCH) { - if (len != 0) /* Not a literal? */ + if (node_idx == 0) break; - /* Tally the main symbol for the literal. */ - c->freqs.main[offset_data]++; + /* Save the literal run length for the next sequence + * (the "previous sequence" when walking backwards). */ + len = item & OPTIMUM_LEN_MASK; + if (record) { + c->chosen_sequences[seq_idx--].litrunlen = + lit_start_node - node_idx; + lit_start_node = node_idx - len; + } - if (--node_idx == 0) /* Beginning of block was reached? */ - goto out; + /* Tally the rep0 match after the gap. */ + v = len - LZX_MIN_MATCH_LEN; + if (record) + c->chosen_sequences[seq_idx].adjusted_length = v; + if (v >= LZX_NUM_PRIMARY_LENS) { + c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; + v = LZX_NUM_PRIMARY_LENS; + } + c->freqs.main[LZX_NUM_CHARS + v]++; + if (record) + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v; + + /* Tally the literal in the gap. */ + c->freqs.main[(u8)(item >> OPTIMUM_OFFSET_SHIFT)]++; + + /* Fall through and tally the match before the gap. + * (It was temporarily saved in the 'cost' field of the + * previous node, which was free to reuse.) */ + item = c->optimum_nodes[--node_idx].cost; + node_idx -= len; } + #else /* CONSIDER_GAP_MATCHES */ + if (node_idx == 0) + break; + #endif /* !CONSIDER_GAP_MATCHES */ + + len = item & OPTIMUM_LEN_MASK; + offset_data = item >> OPTIMUM_OFFSET_SHIFT; /* Save the literal run length for the next sequence (the - * "previous sequence" when walking backwards). */ - c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx; - node_idx -= len; - lit_start_node = node_idx; + * "previous sequence" when walking backwards). */ + if (record) { + c->chosen_sequences[seq_idx--].litrunlen = + lit_start_node - node_idx; + node_idx -= len; + lit_start_node = node_idx; + } else { + node_idx -= len; + } - /* Record a match. */ + /* Record a match. */ - /* Tally the aligned offset symbol if needed. */ + /* Tally the aligned offset symbol if needed. */ if (offset_data >= 16) c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++; - /* Save the adjusted length. */ + /* Save the adjusted length. */ v = len - LZX_MIN_MATCH_LEN; - c->chosen_sequences[seq_idx].adjusted_length = v; + if (record) + c->chosen_sequences[seq_idx].adjusted_length = v; - /* Tally the length symbol if needed. */ + /* Tally the length symbol if needed. */ if (v >= LZX_NUM_PRIMARY_LENS) { c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; v = LZX_NUM_PRIMARY_LENS; } - /* Tally the main symbol. */ - offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit); + /* Tally the main symbol. */ + offset_slot = lzx_get_offset_slot(c, offset_data, is_16_bit); v += offset_slot * LZX_NUM_LEN_HEADERS; c->freqs.main[LZX_NUM_CHARS + v]++; - /* Save the adjusted offset and match header. */ - c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = - (offset_data << 9) | v; - - if (node_idx == 0) /* Beginning of block was reached? */ - goto out; + /* Save the adjusted offset and match header. */ + if (record) { + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = + (offset_data << 9) | v; + } } -out: - /* Save the literal run length for the first sequence. */ - c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx; + /* Save the literal run length for the first sequence. */ + if (record) + c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx; /* Return the index in c->chosen_sequences at which the lzx_sequences - * begin. */ + * begin. */ return seq_idx; } +/* + * Given the minimum-cost path computed through the item graph for the current + * block, walk the path and count how many of each symbol in each Huffman-coded + * alphabet would be required to output the items (matches and literals) along + * the path. + * + * Note that the path will be walked backwards (from the end of the block to the + * beginning of the block), but this doesn't matter because this function only + * computes frequencies. + */ +static inline void +lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) +{ + lzx_walk_item_list(c, block_size, is_16_bit, false); +} + +/* + * Like lzx_tally_item_list(), but this function also generates the list of + * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences, + * ready to be output to the bitstream after the Huffman codes are computed. + * The lzx_sequences will be written to decreasing memory addresses as the path + * is walked backwards, which means they will end up in the expected + * first-to-last order. The return value is the index in c->chosen_sequences at + * which the lzx_sequences begin. + */ +static inline u32 +lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) +{ + return lzx_walk_item_list(c, block_size, is_16_bit, true); +} + /* * Find an inexpensive path through the graph of possible match/literal choices * for the current block. The nodes of the graph are * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in * the current block, plus one extra node for end-of-block. The edges of the * graph are matches and literals. The goal is to find the minimum cost path - * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'. + * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost + * model 'c->costs'. * * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and * proceeding forwards one node at a time. At each node, a selection of matches @@ -1427,30 +1536,53 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, bool is_16_bit) { struct lzx_optimum_node *cur_node = c->optimum_nodes; - struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size]; + struct lzx_optimum_node * const end_node = cur_node + block_size; struct lz_match *cache_ptr = c->match_cache; const u8 *in_next = block_begin; const u8 * const block_end = block_begin + block_size; - /* Instead of storing the match offset LRU queues in the + /* + * Instead of storing the match offset LRU queues in the * 'lzx_optimum_node' structures, we save memory (and cache lines) by * storing them in a smaller array. This works because the algorithm * only requires a limited history of the adaptive state. Once a given - * state is more than LZX_MAX_MATCH_LEN bytes behind the current node, - * it is no longer needed. */ + * state is more than LZX_MAX_MATCH_LEN bytes behind the current node + * (more if gap match consideration is enabled; we just round up to 512 + * so it's a power of 2), it is no longer needed. + */ struct lzx_lru_queue queues[512]; - STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1); -#define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)]) + STATIC_ASSERT(sizeof(c->optimum_nodes[0]) == sizeof(queues[0])); +#define QUEUE(node) \ + (*(struct lzx_lru_queue *)((char *)queues + \ + ((uintptr_t)(node) % (ARRAY_LEN(queues) * sizeof(*(node)))))) + /*(queues[(uintptr_t)(node) / sizeof(*(node)) % ARRAY_LEN(queues)])*/ + +#if CONSIDER_GAP_MATCHES + u32 matches_before_gap[ARRAY_LEN(queues)]; +#define MATCH_BEFORE_GAP(node) \ + (matches_before_gap[(uintptr_t)(node) / sizeof(*(node)) % \ + ARRAY_LEN(matches_before_gap)]) +#endif - /* Initially, the cost to reach each node is "infinity". */ + /* + * Initially, the cost to reach each node is "infinity". + * + * The first node actually should have cost 0, but "infinity" + * (0xFFFFFFFF) works just as well because it immediately overflows. + * + * This statement also intentionally sets the 'item' of the first node, + * which would otherwise have no meaning, to 0xFFFFFFFF for use as a + * sentinel. See lzx_walk_item_list(). + */ memset(c->optimum_nodes, 0xFF, (block_size + 1) * sizeof(c->optimum_nodes[0])); - QUEUE(block_begin) = initial_queue; + /* Initialize the recent offsets queue for the first node. */ + QUEUE(cur_node) = initial_queue; + + do { /* For each node in the block in position order... */ - /* The following loop runs 'block_size' iterations, one per node. */ - do { unsigned num_matches; unsigned literal; u32 cost; @@ -1488,10 +1620,10 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN); const u8 *matchptr; - /* Consider R0 match */ - matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next)); + /* Consider rep0 matches. */ + matchptr = in_next - lzx_lru_queue_R0(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R0_done; + goto rep0_done; STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2); do { u32 cost = cur_node->cost + @@ -1508,17 +1640,17 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R0_done: + rep0_done: - /* Consider R1 match */ - matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next)); + /* Consider rep1 matches. */ + matchptr = in_next - lzx_lru_queue_R1(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R1_done; + goto rep1_done; if (matchptr[next_len - 1] != in_next[next_len - 1]) - goto R1_done; + goto rep1_done; for (unsigned len = 2; len < next_len - 1; len++) if (matchptr[len] != in_next[len]) - goto R1_done; + goto rep1_done; do { u32 cost = cur_node->cost + c->costs.match_cost[1][ @@ -1534,17 +1666,17 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R1_done: + rep1_done: - /* Consider R2 match */ - matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next)); + /* Consider rep2 matches. */ + matchptr = in_next - lzx_lru_queue_R2(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R2_done; + goto rep2_done; if (matchptr[next_len - 1] != in_next[next_len - 1]) - goto R2_done; + goto rep2_done; for (unsigned len = 2; len < next_len - 1; len++) if (matchptr[len] != in_next[len]) - goto R2_done; + goto rep2_done; do { u32 cost = cur_node->cost + c->costs.match_cost[2][ @@ -1560,29 +1692,28 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R2_done: + rep2_done: while (next_len > cache_ptr->length) if (++cache_ptr == end_matches) goto done_matches; - /* Consider explicit offset matches */ - do { + /* Consider explicit offset matches. */ + for (;;) { u32 offset = cache_ptr->offset; u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT; - unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data, - is_16_bit); + unsigned offset_slot = lzx_get_offset_slot(c, offset_data, is_16_bit); u32 base_cost = cur_node->cost; + u32 cost; - #if LZX_CONSIDER_ALIGNED_COSTS - if (offset_data >= 16) + #if CONSIDER_ALIGNED_COSTS + if (offset >= 16 - LZX_OFFSET_ADJUSTMENT) base_cost += c->costs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]; #endif - do { - u32 cost = base_cost + - c->costs.match_cost[offset_slot][ + cost = base_cost + + c->costs.match_cost[offset_slot][ next_len - LZX_MIN_MATCH_LEN]; if (cost < (cur_node + next_len)->cost) { (cur_node + next_len)->cost = cost; @@ -1590,7 +1721,43 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len; } } while (++next_len <= cache_ptr->length); - } while (++cache_ptr != end_matches); + + if (++cache_ptr == end_matches) { + #if CONSIDER_GAP_MATCHES + /* Also consider the longest explicit + * offset match as a "gap match": match + * + lit + rep0. */ + s32 remaining = (block_end - in_next) - next_len; + if (likely(remaining >= 2)) { + const u8 *strptr = in_next + next_len; + const u8 *matchptr = strptr - offset; + if (load_u16_unaligned(strptr) == load_u16_unaligned(matchptr)) { + STATIC_ASSERT(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2 >= 250); + STATIC_ASSERT(ARRAY_LEN(queues) == ARRAY_LEN(matches_before_gap)); + u32 limit = min(remaining, + min(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2, + LZX_MAX_MATCH_LEN)); + u32 rep0_len = lz_extend(strptr, matchptr, 2, limit); + u8 lit = strptr[-1]; + cost += c->costs.main[lit] + + c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN]; + u32 total_len = next_len + rep0_len; + if (cost < (cur_node + total_len)->cost) { + (cur_node + total_len)->cost = cost; + (cur_node + total_len)->item = + OPTIMUM_GAP_MATCH | + ((u32)lit << OPTIMUM_OFFSET_SHIFT) | + rep0_len; + MATCH_BEFORE_GAP(cur_node + total_len) = + (offset_data << OPTIMUM_OFFSET_SHIFT) | + (next_len - 1); + } + } + } + #endif /* CONSIDER_GAP_MATCHES */ + break; + } + } } done_matches: @@ -1599,42 +1766,66 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, * To avoid an extra branch, actually checking the preferability * of coding the literal is integrated into the queue update - * code below. */ + * code below. */ literal = *in_next++; cost = cur_node->cost + c->costs.main[literal]; - /* Advance to the next position. */ + /* Advance to the next position. */ cur_node++; /* The lowest-cost path to the current position is now known. * Finalize the recent offsets queue that results from taking - * this lowest-cost path. */ + * this lowest-cost path. */ if (cost <= cur_node->cost) { - /* Literal: queue remains unchanged. */ + /* Literal: queue remains unchanged. */ cur_node->cost = cost; cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT; - QUEUE(in_next) = QUEUE(in_next - 1); + QUEUE(cur_node) = QUEUE(cur_node - 1); } else { - /* Match: queue update is needed. */ + /* Match: queue update is needed. */ unsigned len = cur_node->item & OPTIMUM_LEN_MASK; + #if CONSIDER_GAP_MATCHES + s32 offset_data = (s32)cur_node->item >> OPTIMUM_OFFSET_SHIFT; + STATIC_ASSERT(OPTIMUM_GAP_MATCH == 0x80000000); /* assuming sign extension */ + #else u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT; + #endif + if (offset_data >= LZX_NUM_RECENT_OFFSETS) { - /* Explicit offset match: insert offset at front */ - QUEUE(in_next) = - lzx_lru_queue_push(QUEUE(in_next - len), + /* Explicit offset match: insert offset at front. */ + QUEUE(cur_node) = + lzx_lru_queue_push(QUEUE(cur_node - len), offset_data - LZX_OFFSET_ADJUSTMENT); - } else { - /* Repeat offset match: swap offset to front */ - QUEUE(in_next) = - lzx_lru_queue_swap(QUEUE(in_next - len), + } else + #if CONSIDER_GAP_MATCHES + if (offset_data < 0) { + /* "Gap match": Explicit offset match, then a + * literal, then rep0 match. Save the explicit + * offset match information in the cost field of + * the previous node, which isn't needed + * anymore. Then insert the offset at the front + * of the queue. */ + u32 match_before_gap = MATCH_BEFORE_GAP(cur_node); + (cur_node - 1)->cost = match_before_gap; + QUEUE(cur_node) = + lzx_lru_queue_push(QUEUE(cur_node - len - 1 - + (match_before_gap & OPTIMUM_LEN_MASK)), + (match_before_gap >> OPTIMUM_OFFSET_SHIFT) - + LZX_OFFSET_ADJUSTMENT); + } else + #endif + { + /* Repeat offset match: swap offset to front. */ + QUEUE(cur_node) = + lzx_lru_queue_swap(QUEUE(cur_node - len), offset_data); } } } while (cur_node != end_node); - /* Return the match offset queue at the end of the minimum cost path. */ - return QUEUE(block_end); + /* Return the recent offsets queue at the end of the path. */ + return QUEUE(cur_node); } /* Given the costs for the main and length codewords, compute 'match_costs'. */ @@ -1645,27 +1836,32 @@ lzx_compute_match_costs(struct lzx_compressor *c) LZX_NUM_LEN_HEADERS; struct lzx_costs *costs = &c->costs; - for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) { - - u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST; + for (unsigned offset_slot = 0; offset_slot < num_offset_slots; + offset_slot++) + { + u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] << + COST_SHIFT; unsigned main_symbol = LZX_NUM_CHARS + (offset_slot * LZX_NUM_LEN_HEADERS); unsigned i; - #if LZX_CONSIDER_ALIGNED_COSTS + #if CONSIDER_ALIGNED_COSTS if (offset_slot >= 8) - extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST; + extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS << COST_SHIFT; #endif - for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) + for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) { costs->match_cost[offset_slot][i] = costs->main[main_symbol++] + extra_cost; + } extra_cost += costs->main[main_symbol]; - for (; i < LZX_NUM_LENS; i++) + for (; i < LZX_NUM_LENS; i++) { costs->match_cost[offset_slot][i] = - costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost; + costs->len[i - LZX_NUM_PRIMARY_LENS] + + extra_cost; + } } } @@ -1678,8 +1874,8 @@ lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size) bool have_byte[256]; unsigned num_used_bytes; - /* The costs below are hard coded to use a scaling factor of 16. */ - STATIC_ASSERT(LZX_BIT_COST == 16); + /* The costs below are hard coded to use a COST_SHIFT of 6. */ + STATIC_ASSERT(COST_SHIFT == 6); /* * Heuristics: @@ -1704,17 +1900,17 @@ lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size) num_used_bytes += have_byte[i]; for (i = 0; i < 256; i++) - c->costs.main[i] = 140 - (256 - num_used_bytes) / 4; + c->costs.main[i] = 560 - (256 - num_used_bytes); for (; i < c->num_main_syms; i++) - c->costs.main[i] = 170; + c->costs.main[i] = 680; for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) - c->costs.len[i] = 103 + (i / 4); + c->costs.len[i] = 412 + i; -#if LZX_CONSIDER_ALIGNED_COSTS +#if CONSIDER_ALIGNED_COSTS for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) - c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST; + c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS << COST_SHIFT; #endif lzx_compute_match_costs(c); @@ -1722,33 +1918,42 @@ lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size) /* Update the current cost model to reflect the computed Huffman codes. */ static void -lzx_update_costs(struct lzx_compressor *c) +lzx_set_costs_from_codes(struct lzx_compressor *c) { unsigned i; const struct lzx_lens *lens = &c->codes[c->codes_index].lens; for (i = 0; i < c->num_main_syms; i++) { c->costs.main[i] = (lens->main[i] ? lens->main[i] : - MAIN_CODEWORD_LIMIT) * LZX_BIT_COST; + MAIN_CODEWORD_LIMIT) << COST_SHIFT; } for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) { c->costs.len[i] = (lens->len[i] ? lens->len[i] : - LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST; + LENGTH_CODEWORD_LIMIT) << COST_SHIFT; } -#if LZX_CONSIDER_ALIGNED_COSTS +#if CONSIDER_ALIGNED_COSTS for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : - ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST; + ALIGNED_CODEWORD_LIMIT) << COST_SHIFT; } #endif lzx_compute_match_costs(c); } +/* + * Choose a "near-optimal" literal/match sequence to use for the current block, + * then flush the block. Because the cost of each Huffman symbol is unknown + * until the Huffman codes have been built and the Huffman codes themselves + * depend on the symbol frequencies, this uses an iterative optimization + * algorithm to approximate an optimal solution. The first optimization pass + * for the block uses default costs. Additional passes use costs taken from the + * Huffman codes computed in the previous pass. + */ static inline struct lzx_lru_queue -lzx_optimize_and_write_block(struct lzx_compressor * const restrict c, +lzx_optimize_and_flush_block(struct lzx_compressor * const restrict c, struct lzx_output_bitstream * const restrict os, const u8 * const restrict block_begin, const u32 block_size, @@ -1759,25 +1964,26 @@ lzx_optimize_and_write_block(struct lzx_compressor * const restrict c, struct lzx_lru_queue new_queue; u32 seq_idx; - /* The first optimization pass uses a default cost model. Each - * additional optimization pass uses a cost model derived from the - * Huffman code computed in the previous pass. */ - lzx_set_default_costs(c, block_begin, block_size); - lzx_reset_symbol_frequencies(c); - do { + + for (;;) { new_queue = lzx_find_min_cost_path(c, block_begin, block_size, initial_queue, is_16_bit); - if (num_passes_remaining > 1) { - lzx_tally_item_list(c, block_size, is_16_bit); - lzx_make_huffman_codes(c); - lzx_update_costs(c); - lzx_reset_symbol_frequencies(c); - } - } while (--num_passes_remaining); + if (--num_passes_remaining == 0) + break; + + /* At least one optimization pass remains. Update the costs. */ + lzx_reset_symbol_frequencies(c); + lzx_tally_item_list(c, block_size, is_16_bit); + lzx_build_huffman_codes(c); + lzx_set_costs_from_codes(c); + } + + /* Done optimizing. Generate the sequence list and flush the block. */ + lzx_reset_symbol_frequencies(c); seq_idx = lzx_record_item_list(c, block_size, is_16_bit); - lzx_finish_block(c, os, block_begin, block_size, seq_idx); + lzx_flush_block(c, os, block_begin, block_size, seq_idx); return new_queue; } @@ -1785,7 +1991,7 @@ lzx_optimize_and_write_block(struct lzx_compressor * const restrict c, * This is the "near-optimal" LZX compressor. * * For each block, it performs a relatively thorough graph search to find an - * inexpensive (in terms of compressed size) way to output that block. + * inexpensive (in terms of compressed size) way to output the block. * * Note: there are actually many things this algorithm leaves on the table in * terms of compression ratio. So although it may be "near-optimal", it is @@ -1795,182 +2001,333 @@ lzx_optimize_and_write_block(struct lzx_compressor * const restrict c, * simpler "greedy" or "lazy" parse while still being relatively fast. */ static inline void -lzx_compress_near_optimal(struct lzx_compressor *c, - struct lzx_output_bitstream *os, +lzx_compress_near_optimal(struct lzx_compressor * restrict c, + const u8 * const restrict in_begin, size_t in_nbytes, + struct lzx_output_bitstream * restrict os, bool is_16_bit) { - const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; - const u8 * const in_end = in_begin + c->in_nbytes; + const u8 * const in_end = in_begin + in_nbytes; u32 max_len = LZX_MAX_MATCH_LEN; u32 nice_len = min(c->nice_match_length, max_len); - u32 next_hashes[2] = {}; - struct lzx_lru_queue queue; + u32 next_hashes[2] = {0, 0}; + struct lzx_lru_queue queue = LZX_QUEUE_INITIALIZER; + /* Initialize the matchfinder. */ CALL_BT_MF(is_16_bit, c, bt_matchfinder_init); - lzx_lru_queue_init(&queue); do { - /* Starting a new block */ + /* Starting a new block */ const u8 * const in_block_begin = in_next; - const u8 * const in_block_end = - in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next); - - /* Run the block through the matchfinder and cache the matches. */ + const u8 * const in_max_block_end = + in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next); struct lz_match *cache_ptr = c->match_cache; - do { - struct lz_match *lz_matchptr; - u32 best_len; + const u8 *next_search_pos = in_next; + const u8 *next_observation = + (in_max_block_end - in_next < MIN_BLOCK_SIZE) ? + in_max_block_end : in_next; + const u8 *next_pause_point = + min(in_next + min(MIN_BLOCK_SIZE, + in_max_block_end - in_next), + in_max_block_end - min(LZX_MAX_MATCH_LEN - 1, + in_max_block_end - in_next)); + + lzx_init_block_split_stats(&c->split_stats); - /* If approaching the end of the input buffer, adjust - * 'max_len' and 'nice_len' accordingly. */ - if (unlikely(max_len > in_end - in_next)) { - max_len = in_end - in_next; - nice_len = min(max_len, nice_len); - if (unlikely(max_len < - BT_MATCHFINDER_REQUIRED_NBYTES)) - { - in_next++; - cache_ptr->length = 0; - cache_ptr++; - continue; + /* + * Run the input buffer through the matchfinder, caching the + * matches, until we decide to end the block. + * + * For a tighter matchfinding loop, we compute a "pause point", + * which is the next position at which we may need to check + * whether to end the block or to decrease max_len. We then + * only do these extra checks upon reaching the pause point. + */ + resume_matchfinding: + do { + if (in_next >= next_search_pos) { + /* Search for matches at this position. */ + struct lz_match *lz_matchptr; + u32 best_len; + + lz_matchptr = CALL_BT_MF(is_16_bit, c, + bt_matchfinder_get_matches, + in_begin, + in_next - in_begin, + max_len, + nice_len, + c->max_search_depth, + next_hashes, + &best_len, + cache_ptr + 1); + cache_ptr->length = lz_matchptr - (cache_ptr + 1); + cache_ptr = lz_matchptr; + + /* Accumulate block split statistics. */ + if (in_next >= next_observation) { + if (best_len >= 4) { + lzx_observe_match(&c->split_stats, + best_len); + next_observation = in_next + best_len; + } else { + lzx_observe_literal(&c->split_stats, + *in_next); + next_observation = in_next + 1; + } } - } - /* Check for matches. */ - lz_matchptr = CALL_BT_MF(is_16_bit, c, - bt_matchfinder_get_matches, - in_begin, - in_next - in_begin, - max_len, - nice_len, - c->max_search_depth, - next_hashes, - &best_len, - cache_ptr + 1); - in_next++; - cache_ptr->length = lz_matchptr - (cache_ptr + 1); - cache_ptr = lz_matchptr; - - /* - * If there was a very long match found, then don't - * cache any matches for the bytes covered by that - * match. This avoids degenerate behavior when - * compressing highly redundant data, where the number - * of matches can be very large. - * - * This heuristic doesn't actually hurt the compression - * ratio very much. If there's a long match, then the - * data must be highly compressible, so it doesn't - * matter as much what we do. - */ - if (best_len >= nice_len) { - --best_len; - do { - if (unlikely(max_len > in_end - in_next)) { - max_len = in_end - in_next; - nice_len = min(max_len, nice_len); - if (unlikely(max_len < - BT_MATCHFINDER_REQUIRED_NBYTES)) - { - in_next++; - cache_ptr->length = 0; - cache_ptr++; - continue; - } - } - CALL_BT_MF(is_16_bit, c, - bt_matchfinder_skip_position, - in_begin, - in_next - in_begin, - nice_len, - c->max_search_depth, - next_hashes); - in_next++; + /* + * If there was a very long match found, then + * don't cache any matches for the bytes covered + * by that match. This avoids degenerate + * behavior when compressing highly redundant + * data, where the number of matches can be very + * large. + * + * This heuristic doesn't actually hurt the + * compression ratio very much. If there's a + * long match, then the data must be highly + * compressible, so it doesn't matter as much + * what we do. + */ + if (best_len >= nice_len) + next_search_pos = in_next + best_len; + } else { + /* Don't search for matches at this position. */ + CALL_BT_MF(is_16_bit, c, + bt_matchfinder_skip_position, + in_begin, + in_next - in_begin, + nice_len, + c->max_search_depth, + next_hashes); + cache_ptr->length = 0; + cache_ptr++; + } + } while (++in_next < next_pause_point && + likely(cache_ptr < &c->match_cache[CACHE_LENGTH])); + + /* Adjust max_len and nice_len if we're nearing the end of the + * input buffer. In addition, if we are so close to the end of + * the input buffer that there cannot be any more matches, then + * just advance through the last few positions and record no + * matches. */ + if (unlikely(max_len > in_end - in_next)) { + max_len = in_end - in_next; + nice_len = min(max_len, nice_len); + if (max_len < BT_MATCHFINDER_REQUIRED_NBYTES) { + while (in_next != in_end) { cache_ptr->length = 0; cache_ptr++; - } while (--best_len); + in_next++; + } } - } while (in_next < in_block_end && - likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH])); + } - /* We've finished running the block through the matchfinder. - * Now choose a match/literal sequence and write the block. */ + /* End the block if the match cache may overflow. */ + if (unlikely(cache_ptr >= &c->match_cache[CACHE_LENGTH])) + goto end_block; + + /* End the block if the soft maximum size has been reached. */ + if (in_next >= in_max_block_end) + goto end_block; + + /* End the block if the block splitting algorithm thinks this is + * a good place to do so. */ + if (c->split_stats.num_new_observations >= + NUM_OBSERVATIONS_PER_BLOCK_CHECK) + { + if (in_max_block_end - in_next < MIN_BLOCK_SIZE) + next_observation = in_max_block_end; + else if (lzx_end_block_check(&c->split_stats)) + goto end_block; + } - queue = lzx_optimize_and_write_block(c, os, in_block_begin, + /* It's not time to end the block yet. Compute the next pause + * point and resume the matchfinding. */ + next_pause_point = + min(in_next + min(NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 - + c->split_stats.num_new_observations, + in_max_block_end - in_next), + in_max_block_end - min(LZX_MAX_MATCH_LEN - 1, + in_max_block_end - in_next)); + goto resume_matchfinding; + + end_block: + /* We've decided on a block boundary and cached matches. Now + * choose a match/literal sequence and flush the block. */ + queue = lzx_optimize_and_flush_block(c, os, in_block_begin, in_next - in_block_begin, queue, is_16_bit); } while (in_next != in_end); } static void -lzx_compress_near_optimal_16(struct lzx_compressor *c, - struct lzx_output_bitstream *os) +lzx_compress_near_optimal_16(struct lzx_compressor *c, const u8 *in, + size_t in_nbytes, struct lzx_output_bitstream *os) { - lzx_compress_near_optimal(c, os, true); + lzx_compress_near_optimal(c, in, in_nbytes, os, true); } static void -lzx_compress_near_optimal_32(struct lzx_compressor *c, - struct lzx_output_bitstream *os) +lzx_compress_near_optimal_32(struct lzx_compressor *c, const u8 *in, + size_t in_nbytes, struct lzx_output_bitstream *os) { - lzx_compress_near_optimal(c, os, false); + lzx_compress_near_optimal(c, in, in_nbytes, os, false); } +/******************************************************************************/ +/* Faster ("lazy") compression algorithm */ +/*----------------------------------------------------------------------------*/ + /* - * Given a pointer to the current byte sequence and the current list of recent - * match offsets, find the longest repeat offset match. - * - * If no match of at least 2 bytes is found, then return 0. + * Called when the compressor decides to use a literal. This tallies the + * Huffman symbol for the literal and increment the current literal run length. + */ +static inline void +lzx_choose_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p) +{ + lzx_observe_literal(&c->split_stats, literal); + c->freqs.main[literal]++; + ++*litrunlen_p; +} + +/* + * Called when the compressor decides to use a match. This tallies the Huffman + * symbol(s) for a match, saves the match data and the length of the preceding + * literal run, and updates the recent offsets queue. + */ +static inline void +lzx_choose_match(struct lzx_compressor *c, unsigned length, u32 offset_data, + u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit, + u32 *litrunlen_p, struct lzx_sequence **next_seq_p) +{ + u32 litrunlen = *litrunlen_p; + struct lzx_sequence *next_seq = *next_seq_p; + unsigned offset_slot; + unsigned v; + + lzx_observe_match(&c->split_stats, length); + + v = length - LZX_MIN_MATCH_LEN; + + /* Save the literal run length and adjusted length. */ + next_seq->litrunlen = litrunlen; + next_seq->adjusted_length = v; + + /* Compute the length header, then tally the length symbol if needed. */ + if (v >= LZX_NUM_PRIMARY_LENS) { + c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; + v = LZX_NUM_PRIMARY_LENS; + } + + /* Compute the offset slot. */ + offset_slot = lzx_get_offset_slot(c, offset_data, is_16_bit); + + /* Compute the match header. */ + v += offset_slot * LZX_NUM_LEN_HEADERS; + + /* Save the adjusted offset and match header. */ + next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v; + + /* Tally the main symbol. */ + c->freqs.main[LZX_NUM_CHARS + v]++; + + /* Update the recent offsets queue. */ + if (offset_data < LZX_NUM_RECENT_OFFSETS) { + /* Repeat offset match. */ + swap(recent_offsets[0], recent_offsets[offset_data]); + } else { + /* Explicit offset match. */ + + /* Tally the aligned offset symbol if needed. */ + if (offset_data >= 16) + c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++; + + recent_offsets[2] = recent_offsets[1]; + recent_offsets[1] = recent_offsets[0]; + recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT; + } + + /* Reset the literal run length and advance to the next sequence. */ + *next_seq_p = next_seq + 1; + *litrunlen_p = 0; +} + +/* + * Called when the compressor ends a block. This finshes the last lzx_sequence, + * which is just a literal run which no following match. This literal run might + * be empty. + */ +static inline void +lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen) +{ + last_seq->litrunlen = litrunlen; + + /* Special value to mark last sequence */ + last_seq->adjusted_offset_and_match_hdr = 0x80000000; +} + +/* + * Find the longest repeat offset match with the current position. If a match + * is found, return its length and set *best_rep_idx_ret to the index of its + * offset in @recent_offsets. Otherwise, return 0. * - * If a match of at least 2 bytes is found, then return its length and set - * *rep_max_idx_ret to the index of its offset in @queue. -*/ + * Don't bother with length 2 matches; consider matches of length >= 3 only. + */ static unsigned lzx_find_longest_repeat_offset_match(const u8 * const in_next, - const u32 bytes_remaining, - const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], - unsigned *rep_max_idx_ret) + const u32 recent_offsets[], + const unsigned max_len, + unsigned *best_rep_idx_ret) { - STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); + STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); /* loop is unrolled */ - const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN); - const u16 next_2_bytes = load_u16_unaligned(in_next); + const u32 seq3 = load_u24_unaligned(in_next); const u8 *matchptr; - unsigned rep_max_len; - unsigned rep_max_idx; + unsigned best_rep_len; + unsigned best_rep_idx; unsigned rep_len; + /* Check for rep0 match (most recent offset) */ matchptr = in_next - recent_offsets[0]; - if (load_u16_unaligned(matchptr) == next_2_bytes) - rep_max_len = lz_extend(in_next, matchptr, 2, max_len); + if (load_u24_unaligned(matchptr) == seq3) + best_rep_len = lz_extend(in_next, matchptr, 3, max_len); else - rep_max_len = 0; - rep_max_idx = 0; + best_rep_len = 0; + best_rep_idx = 0; + /* Check for rep1 match (second most recent offset) */ matchptr = in_next - recent_offsets[1]; - if (load_u16_unaligned(matchptr) == next_2_bytes) { - rep_len = lz_extend(in_next, matchptr, 2, max_len); - if (rep_len > rep_max_len) { - rep_max_len = rep_len; - rep_max_idx = 1; + if (load_u24_unaligned(matchptr) == seq3) { + rep_len = lz_extend(in_next, matchptr, 3, max_len); + if (rep_len > best_rep_len) { + best_rep_len = rep_len; + best_rep_idx = 1; } } + /* Check for rep2 match (third most recent offset) */ matchptr = in_next - recent_offsets[2]; - if (load_u16_unaligned(matchptr) == next_2_bytes) { - rep_len = lz_extend(in_next, matchptr, 2, max_len); - if (rep_len > rep_max_len) { - rep_max_len = rep_len; - rep_max_idx = 2; + if (load_u24_unaligned(matchptr) == seq3) { + rep_len = lz_extend(in_next, matchptr, 3, max_len); + if (rep_len > best_rep_len) { + best_rep_len = rep_len; + best_rep_idx = 2; } } - *rep_max_idx_ret = rep_max_idx; - return rep_max_len; + *best_rep_idx_ret = best_rep_idx; + return best_rep_len; } -/* Fast heuristic scoring for lazy parsing: how "good" is this match? */ +/* + * Fast heuristic scoring for lazy parsing: how "good" is this match? + * This is mainly determined by the length: longer matches are better. + * However, we also give a bonus to close (small offset) matches and to repeat + * offset matches, since those require fewer bits to encode. + */ + static inline unsigned lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset) { @@ -1978,7 +2335,6 @@ lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset) if (adjusted_offset < 4096) score++; - if (adjusted_offset < 256) score++; @@ -1991,29 +2347,42 @@ lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx) return rep_len + 3; } -/* This is the "lazy" LZX compressor. */ +/* + * This is the "lazy" LZX compressor. The basic idea is that before it chooses + * a match, it checks to see if there's a longer match at the next position. If + * yes, it chooses a literal and continues to the next position. If no, it + * chooses the match. + * + * Some additional heuristics are used as well. Repeat offset matches are + * considered favorably and sometimes are chosen immediately. In addition, long + * matches (at least "nice_len" bytes) are chosen immediately as well. Finally, + * when we decide whether a match is "better" than another, we take the offset + * into consideration as well as the length. + */ static inline void -lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, - bool is_16_bit) +lzx_compress_lazy(struct lzx_compressor * restrict c, + const u8 * const restrict in_begin, size_t in_nbytes, + struct lzx_output_bitstream * restrict os, bool is_16_bit) { - const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; - const u8 * const in_end = in_begin + c->in_nbytes; + const u8 * const in_end = in_begin + in_nbytes; unsigned max_len = LZX_MAX_MATCH_LEN; unsigned nice_len = min(c->nice_match_length, max_len); STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); u32 recent_offsets[3] = {1, 1, 1}; - u32 next_hashes[2] = {}; + u32 next_hashes[2] = {0, 0}; + /* Initialize the matchfinder. */ CALL_HC_MF(is_16_bit, c, hc_matchfinder_init); do { - /* Starting a new block */ + /* Starting a new block */ const u8 * const in_block_begin = in_next; - const u8 * const in_block_end = - in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next); + const u8 * const in_max_block_end = + in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next); struct lzx_sequence *next_seq = c->chosen_sequences; + u32 litrunlen = 0; unsigned cur_len; u32 cur_offset; u32 cur_offset_data; @@ -2022,22 +2391,26 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, u32 next_offset; u32 next_offset_data; unsigned next_score; - unsigned rep_max_len; - unsigned rep_max_idx; + unsigned best_rep_len; + unsigned best_rep_idx; unsigned rep_score; unsigned skip_len; - u32 litrunlen = 0; lzx_reset_symbol_frequencies(c); + lzx_init_block_split_stats(&c->split_stats); do { + /* Adjust max_len and nice_len if we're nearing the end + * of the input buffer. */ if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); } - /* Find the longest match at the current position. */ - + /* Find the longest match (subject to the + * max_search_depth cutoff parameter) with the current + * position. Don't bother with length 2 matches; only + * look for matches of length >= 3. */ cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match, in_begin, @@ -2048,6 +2421,9 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, c->max_search_depth, next_hashes, &cur_offset); + + /* If there was no match found, or the only match found + * was a distant short match, then choose a literal. */ if (cur_len < 3 || (cur_len == 3 && cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT && @@ -2055,12 +2431,13 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, cur_offset != recent_offsets[1] && cur_offset != recent_offsets[2])) { - /* There was no match found, or the only match found - * was a distant length 3 match. Output a literal. */ - lzx_record_literal(c, *in_next++, &litrunlen); + lzx_choose_literal(c, *in_next, &litrunlen); + in_next++; continue; } + /* Heuristic: if this match has the most recent offset, + * then go ahead and choose it as a rep0 match. */ if (cur_offset == recent_offsets[0]) { in_next++; cur_offset_data = 0; @@ -2068,38 +2445,44 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, goto choose_cur_match; } + /* Compute the longest match's score as an explicit + * offset match. */ cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT; cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data); - /* Consider a repeat offset match */ - rep_max_len = lzx_find_longest_repeat_offset_match(in_next, - in_end - in_next, - recent_offsets, - &rep_max_idx); + /* Find the longest repeat offset match at this + * position. If we find one and it's "better" than the + * explicit offset match we found, then go ahead and + * choose the repeat offset match immediately. */ + best_rep_len = lzx_find_longest_repeat_offset_match(in_next, + recent_offsets, + max_len, + &best_rep_idx); in_next++; - if (rep_max_len >= 3 && - (rep_score = lzx_repeat_offset_match_score(rep_max_len, - rep_max_idx)) >= cur_score) + if (best_rep_len != 0 && + (rep_score = lzx_repeat_offset_match_score(best_rep_len, + best_rep_idx)) >= cur_score) { - cur_len = rep_max_len; - cur_offset_data = rep_max_idx; - skip_len = rep_max_len - 1; + cur_len = best_rep_len; + cur_offset_data = best_rep_idx; + skip_len = best_rep_len - 1; goto choose_cur_match; } have_cur_match: + /* + * We have a match at the current position. If the + * match is very long, then choose it immediately. + * Otherwise, see if there's a better match at the next + * position. + */ - /* We have a match at the current position. */ - - /* If we have a very long match, choose it immediately. */ if (cur_len >= nice_len) { skip_len = cur_len - 1; goto choose_cur_match; } - /* See if there's a better match at the next position. */ - if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); @@ -2117,6 +2500,7 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, &next_offset); if (next_len <= cur_len - 2) { + /* No potentially better match was found. */ in_next++; skip_len = cur_len - 2; goto choose_cur_match; @@ -2125,32 +2509,32 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT; next_score = lzx_explicit_offset_match_score(next_len, next_offset_data); - rep_max_len = lzx_find_longest_repeat_offset_match(in_next, - in_end - in_next, - recent_offsets, - &rep_max_idx); + best_rep_len = lzx_find_longest_repeat_offset_match(in_next, + recent_offsets, + max_len, + &best_rep_idx); in_next++; - if (rep_max_len >= 3 && - (rep_score = lzx_repeat_offset_match_score(rep_max_len, - rep_max_idx)) >= next_score) + if (best_rep_len != 0 && + (rep_score = lzx_repeat_offset_match_score(best_rep_len, + best_rep_idx)) >= next_score) { if (rep_score > cur_score) { /* The next match is better, and it's a - * repeat offset match. */ - lzx_record_literal(c, *(in_next - 2), + * repeat offset match. */ + lzx_choose_literal(c, *(in_next - 2), &litrunlen); - cur_len = rep_max_len; - cur_offset_data = rep_max_idx; + cur_len = best_rep_len; + cur_offset_data = best_rep_idx; skip_len = cur_len - 1; goto choose_cur_match; } } else { if (next_score > cur_score) { /* The next match is better, and it's an - * explicit offset match. */ - lzx_record_literal(c, *(in_next - 2), + * explicit offset match. */ + lzx_choose_literal(c, *(in_next - 2), &litrunlen); cur_len = next_len; cur_offset_data = next_offset_data; @@ -2159,11 +2543,13 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, } } - /* The original match was better. */ + /* The original match was better; choose it. */ skip_len = cur_len - 2; choose_cur_match: - lzx_record_match(c, cur_len, cur_offset_data, + /* Choose a match and have the matchfinder skip over its + * remaining bytes. */ + lzx_choose_match(c, cur_len, cur_offset_data, recent_offsets, is_16_bit, &litrunlen, &next_seq); in_next = CALL_HC_MF(is_16_bit, c, @@ -2173,35 +2559,49 @@ lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os, in_end - in_begin, skip_len, next_hashes); - } while (in_next < in_block_end); - lzx_finish_sequence(next_seq, litrunlen); + /* Keep going until it's time to end the block. */ + } while (in_next < in_max_block_end && + !lzx_should_end_block(&c->split_stats, in_block_begin, + in_next, in_end)); - lzx_finish_block(c, os, in_block_begin, in_next - in_block_begin, 0); + /* Flush the block. */ + lzx_finish_sequence(next_seq, litrunlen); + lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0); + /* Keep going until we've reached the end of the input buffer. */ } while (in_next != in_end); } static void -lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os) +lzx_compress_lazy_16(struct lzx_compressor *c, const u8 *in, size_t in_nbytes, + struct lzx_output_bitstream *os) { - lzx_compress_lazy(c, os, true); + lzx_compress_lazy(c, in, in_nbytes, os, true); } static void -lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os) +lzx_compress_lazy_32(struct lzx_compressor *c, const u8 *in, size_t in_nbytes, + struct lzx_output_bitstream *os) { - lzx_compress_lazy(c, os, false); + lzx_compress_lazy(c, in, in_nbytes, os, false); } -/* Generate the acceleration tables for offset slots. */ +/******************************************************************************/ +/* Compressor operations */ +/*----------------------------------------------------------------------------*/ + +/* + * Generate tables for mapping match offsets (actually, "adjusted" match + * offsets) to offset slots. + */ static void lzx_init_offset_slot_tabs(struct lzx_compressor *c) { u32 adjusted_offset = 0; unsigned slot = 0; - /* slots [0, 29] */ + /* slots [0, 29] */ for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1); adjusted_offset++) { @@ -2210,7 +2610,7 @@ lzx_init_offset_slot_tabs(struct lzx_compressor *c) c->offset_slot_tab_1[adjusted_offset] = slot; } - /* slots [30, 49] */ + /* slots [30, 49] */ for (; adjusted_offset < LZX_MAX_WINDOW_SIZE; adjusted_offset += (u32)1 << 14) { @@ -2223,7 +2623,7 @@ lzx_init_offset_slot_tabs(struct lzx_compressor *c) static size_t lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level) { - if (compression_level <= LZX_MAX_FAST_LEVEL) { + if (compression_level <= MAX_FAST_LEVEL) { if (lzx_is_16_bit(max_bufsize)) return offsetof(struct lzx_compressor, hc_mf_16) + hc_matchfinder_size_16(max_bufsize); @@ -2240,6 +2640,7 @@ lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level) } } +/* Compute the amount of memory needed to allocate an LZX compressor. */ static u64 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level, bool destructive) @@ -2251,10 +2652,11 @@ lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level, size += lzx_get_compressor_size(max_bufsize, compression_level); if (!destructive) - size += max_bufsize; /* in_buffer */ + size += max_bufsize; /* account for in_buffer */ return size; } +/* Allocate an LZX compressor. */ static int lzx_create_compressor(size_t max_bufsize, unsigned compression_level, bool destructive, void **c_ret) @@ -2262,87 +2664,75 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level, unsigned window_order; struct lzx_compressor *c; + /* Validate the maximum buffer size and get the window order from it. */ window_order = lzx_get_window_order(max_bufsize); if (window_order == 0) return WIMLIB_ERR_INVALID_PARAM; + /* Allocate the compressor. */ c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level)); if (!c) goto oom0; - c->destructive = destructive; - - c->num_main_syms = lzx_get_num_main_syms(window_order); c->window_order = window_order; + c->num_main_syms = lzx_get_num_main_syms(window_order); + c->destructive = destructive; + /* Allocate the buffer for preprocessed data if needed. */ if (!c->destructive) { c->in_buffer = MALLOC(max_bufsize); if (!c->in_buffer) goto oom1; } - if (compression_level <= LZX_MAX_FAST_LEVEL) { - - /* Fast compression: Use lazy parsing. */ + if (compression_level <= MAX_FAST_LEVEL) { + /* Fast compression: Use lazy parsing. */ if (lzx_is_16_bit(max_bufsize)) c->impl = lzx_compress_lazy_16; else c->impl = lzx_compress_lazy_32; + + /* Scale max_search_depth and nice_match_length with the + * compression level. */ c->max_search_depth = (60 * compression_level) / 20; c->nice_match_length = (80 * compression_level) / 20; /* lzx_compress_lazy() needs max_search_depth >= 2 because it * halves the max_search_depth when attempting a lazy match, and - * max_search_depth cannot be 0. */ - if (c->max_search_depth < 2) - c->max_search_depth = 2; + * max_search_depth must be at least 1. */ + c->max_search_depth = max(c->max_search_depth, 2); } else { - /* Normal / high compression: Use near-optimal parsing. */ - + /* Normal / high compression: Use near-optimal parsing. */ if (lzx_is_16_bit(max_bufsize)) c->impl = lzx_compress_near_optimal_16; else c->impl = lzx_compress_near_optimal_32; - /* Scale nice_match_length and max_search_depth with the - * compression level. */ + /* Scale max_search_depth and nice_match_length with the + * compression level. */ c->max_search_depth = (24 * compression_level) / 50; c->nice_match_length = (48 * compression_level) / 50; - /* Set a number of optimization passes appropriate for the - * compression level. */ - + /* Also scale num_optim_passes with the compression level. But + * the more passes there are, the less they help --- so don't + * add them linearly. */ c->num_optim_passes = 1; - - if (compression_level >= 45) - c->num_optim_passes++; - - /* Use more optimization passes for higher compression levels. - * But the more passes there are, the less they help --- so - * don't add them linearly. */ - if (compression_level >= 70) { - c->num_optim_passes++; - if (compression_level >= 100) - c->num_optim_passes++; - if (compression_level >= 150) - c->num_optim_passes++; - if (compression_level >= 200) - c->num_optim_passes++; - if (compression_level >= 300) - c->num_optim_passes++; - } + c->num_optim_passes += (compression_level >= 45); + c->num_optim_passes += (compression_level >= 70); + c->num_optim_passes += (compression_level >= 100); + c->num_optim_passes += (compression_level >= 150); + c->num_optim_passes += (compression_level >= 200); + c->num_optim_passes += (compression_level >= 300); + + /* max_search_depth must be at least 1. */ + c->max_search_depth = max(c->max_search_depth, 1); } - /* max_search_depth == 0 is invalid. */ - if (c->max_search_depth < 1) - c->max_search_depth = 1; - - if (c->nice_match_length > LZX_MAX_MATCH_LEN) - c->nice_match_length = LZX_MAX_MATCH_LEN; - + /* Prepare the offset => offset slot mapping. */ lzx_init_offset_slot_tabs(c); + *c_ret = c; return 0; @@ -2352,6 +2742,7 @@ oom0: return WIMLIB_ERR_NOMEM; } +/* Compress a buffer of data. */ static size_t lzx_compress(const void *restrict in, size_t in_nbytes, void *restrict out, size_t out_nbytes_avail, void *restrict _c) @@ -2360,35 +2751,44 @@ lzx_compress(const void *restrict in, size_t in_nbytes, struct lzx_output_bitstream os; size_t result; - /* Don't bother trying to compress very small inputs. */ - if (in_nbytes < 100) + /* Don't bother trying to compress very small inputs. */ + if (in_nbytes < 64) return 0; - /* Copy the input data into the internal buffer and preprocess it. */ - if (c->destructive) - c->in_buffer = (void *)in; - else + /* Copy the input data into the internal buffer if needed. */ + if (!c->destructive) { memcpy(c->in_buffer, in, in_nbytes); - c->in_nbytes = in_nbytes; - lzx_preprocess(c->in_buffer, in_nbytes); + in = c->in_buffer; + } + + /* Preprocess the input data. */ + lzx_preprocess((void *)in, in_nbytes); - /* Initially, the previous Huffman codeword lengths are all zeroes. */ + /* Initially, the previous Huffman codeword lengths are all zeroes. */ c->codes_index = 0; memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens)); - /* Initialize the output bitstream. */ + /* Initialize the output bitstream. */ lzx_init_output(&os, out, out_nbytes_avail); - /* Call the compression level-specific compress() function. */ - (*c->impl)(c, &os); + /* Call the compression level-specific compress() function. */ + (*c->impl)(c, in, in_nbytes, &os); - /* Flush the output bitstream and return the compressed size or 0. */ + /* Flush the output bitstream. */ result = lzx_flush_output(&os); - if (!result && c->destructive) - lzx_postprocess(c->in_buffer, c->in_nbytes); + + /* If the data did not compress to less than its original size and we + * preprocessed the original buffer, then postprocess it to restore it + * to its original state. */ + if (result == 0 && c->destructive) + lzx_postprocess((void *)in, in_nbytes); + + /* Return the number of compressed bytes, or 0 if the input did not + * compress to less than its original size. */ return result; } +/* Free an LZX compressor. */ static void lzx_free_compressor(void *_c) {