X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzx_compress.c;h=f0c3c5268c82c5e26347b9c42e5c008e811886c8;hp=3f3b836c636f8dd2bbdac17ae6a7b2d97bb673e0;hb=48c386f471f50c2e8d07f9766be2410d401297c1;hpb=5343bde03c158cc767b1a347a7323d0e33c78d41 diff --git a/src/lzx_compress.c b/src/lzx_compress.c index 3f3b836c..f0c3c526 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -1,11 +1,11 @@ /* * lzx_compress.c * - * A compressor for the LZX compression format, as used in WIM files. + * A compressor for the LZX compression format, as used in WIM archives. */ /* - * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers + * Copyright (C) 2012-2016 Eric Biggers * * This file is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free @@ -26,19 +26,19 @@ * This file contains a compressor for the LZX ("Lempel-Ziv eXtended") * compression format, as used in the WIM (Windows IMaging) file format. * - * Two different parsing algorithms are implemented: "near-optimal" and "lazy". - * "Near-optimal" is significantly slower than "lazy", but results in a better - * compression ratio. The "near-optimal" algorithm is used at the default - * compression level. + * Two different LZX-compatible algorithms are implemented: "near-optimal" and + * "lazy". "Near-optimal" is significantly slower than "lazy", but results in a + * better compression ratio. The "near-optimal" algorithm is used at the + * default compression level. * * This file may need some slight modifications to be used outside of the WIM * format. In particular, in other situations the LZX block header might be * slightly different, and sliding window support might be required. * - * Note: LZX is a compression format derived from DEFLATE, the format used by - * zlib and gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. - * Certain details are quite similar, such as the method for storing Huffman - * codes. However, the main differences are: + * LZX is a compression format derived from DEFLATE, the format used by zlib and + * gzip. Both LZX and DEFLATE use LZ77 matching and Huffman coding. Certain + * details are quite similar, such as the method for storing Huffman codes. + * However, the main differences are: * * - LZX preprocesses the data to attempt to make x86 machine code slightly more * compressible before attempting to compress it further. @@ -53,162 +53,231 @@ * ("verbatim" and "aligned"). * * - LZX has a minimum match length of 2 rather than 3. Length 2 matches can be - * useful, but generally only if the parser is smart about choosing them. + * useful, but generally only if the compressor is smart about choosing them. * * - In LZX, offset slots 0 through 2 actually represent entries in an LRU queue * of match offsets. This is very useful for certain types of files, such as * binary files that have repeating records. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +/******************************************************************************/ +/* General parameters */ +/*----------------------------------------------------------------------------*/ /* - * Start a new LZX block (with new Huffman codes) after this many bytes. - * - * Note: actual block sizes may slightly exceed this value. - * - * TODO: recursive splitting and cost evaluation might be good for an extremely - * high compression mode, but otherwise it is almost always far too slow for how - * much it helps. Perhaps some sort of heuristic would be useful? + * The compressor uses the faster algorithm at levels <= MAX_FAST_LEVEL. It + * uses the slower algorithm at levels > MAX_FAST_LEVEL. */ -#define LZX_DIV_BLOCK_SIZE 32768 +#define MAX_FAST_LEVEL 34 /* - * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the - * match cache for each byte position. This value should be high enough so that - * nearly the time, all matches found in a given block can fit in the match - * cache. However, fallback behavior (immediately terminating the block) on - * cache overflow is still required. + * The compressor-side limits on the codeword lengths (in bits) for each Huffman + * code. To make outputting bits slightly faster, some of these limits are + * lower than the limits defined by the LZX format. This does not significantly + * affect the compression ratio. */ -#define LZX_CACHE_PER_POS 7 +#define MAIN_CODEWORD_LIMIT 16 +#define LENGTH_CODEWORD_LIMIT 12 +#define ALIGNED_CODEWORD_LIMIT 7 +#define PRE_CODEWORD_LIMIT 7 + + +/******************************************************************************/ +/* Block splitting parameters */ +/*----------------------------------------------------------------------------*/ /* - * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache, - * excluding the extra "overflow" entries. The per-position multiplier is '1 + - * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an - * overhead of one lz_match per position, used to hold the match count at that - * position. + * The compressor always outputs blocks of at least this size in bytes, except + * for the last block which may need to be smaller. */ -#define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS)) +#define MIN_BLOCK_SIZE 6500 /* - * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can - * ever be saved in the match cache for a single position. Since each match we - * save for a single position has a distinct length, we can use the number of - * possible match lengths in LZX as this bound. This bound is guaranteed to be - * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then - * it will never actually be reached. + * The compressor attempts to end a block when it reaches this size in bytes. + * The final size might be slightly larger due to matches extending beyond the + * end of the block. Specifically: + * + * - The near-optimal compressor may choose a match of up to LZX_MAX_MATCH_LEN + * bytes starting at position 'SOFT_MAX_BLOCK_SIZE - 1'. + * + * - The lazy compressor may choose a sequence of literals starting at position + * 'SOFT_MAX_BLOCK_SIZE - 1' when it sees a sequence of increasingly better + * matches. The final match may be up to LZX_MAX_MATCH_LEN bytes. The + * length of the literal sequence is approximately limited by the "nice match + * length" parameter. */ -#define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS +#define SOFT_MAX_BLOCK_SIZE 100000 /* - * LZX_BIT_COST is a scaling factor that represents the cost to output one bit. - * This makes it possible to consider fractional bit costs. - * - * Note: this is only useful as a statistical trick for when the true costs are - * unknown. In reality, each token in LZX requires a whole number of bits to - * output. + * The number of observed items (matches and literals) that represents + * sufficient data for the compressor to decide whether the current block should + * be ended or not. */ -#define LZX_BIT_COST 16 +#define NUM_OBSERVATIONS_PER_BLOCK_CHECK 400 + + +/******************************************************************************/ +/* Parameters for slower algorithm */ +/*----------------------------------------------------------------------------*/ /* - * Consideration of aligned offset costs is disabled for now, due to - * insufficient benefit gained from the time spent. + * The log base 2 of the number of entries in the hash table for finding length + * 2 matches. This could be as high as 16, but using a smaller hash table + * speeds up compression due to reduced cache pressure. */ -#define LZX_CONSIDER_ALIGNED_COSTS 0 +#define BT_MATCHFINDER_HASH2_ORDER 12 /* - * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the - * faster algorithm. + * The number of lz_match structures in the match cache, excluding the extra + * "overflow" entries. This value should be high enough so that nearly the + * time, all matches found in a given block can fit in the match cache. + * However, fallback behavior (immediately terminating the block) on cache + * overflow is still required. */ -#define LZX_MAX_FAST_LEVEL 34 +#define CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5) /* - * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table - * for finding length 2 matches. This can be as high as 16 (in which case the - * hash function is trivial), but using a smaller hash table speeds up - * compression due to reduced cache pressure. + * An upper bound on the number of matches that can ever be saved in the match + * cache for a single position. Since each match we save for a single position + * has a distinct length, we can use the number of possible match lengths in LZX + * as this bound. This bound is guaranteed to be valid in all cases, although + * if 'nice_match_length < LZX_MAX_MATCH_LEN', then it will never actually be + * reached. */ -#define LZX_HASH2_ORDER 12 -#define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER) +#define MAX_MATCHES_PER_POS LZX_NUM_LENS -#include "wimlib/lzx_common.h" +/* + * A scaling factor that makes it possible to consider fractional bit costs. A + * single bit has a cost of BIT_COST. + * + * Note: this is only useful as a statistical trick for when the true costs are + * unknown. Ultimately, each token in LZX requires a whole number of bits to + * output. + */ +#define BIT_COST 64 /* - * The maximum allowed window order for the matchfinder. + * Should the compressor take into account the costs of aligned offset symbols + * instead of assuming that all are equally likely? */ -#define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER +#define CONSIDER_ALIGNED_COSTS 1 -#include +/* + * Should the "minimum" cost path search algorithm consider "gap" matches, where + * a normal match is followed by a literal, then by a match with the same + * offset? This is one specific, somewhat common situation in which the true + * minimum cost path is often different from the path found by looking only one + * edge ahead. + */ +#define CONSIDER_GAP_MATCHES 1 + +/******************************************************************************/ +/* Includes */ +/*----------------------------------------------------------------------------*/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif -#include "wimlib/bt_matchfinder.h" #include "wimlib/compress_common.h" #include "wimlib/compressor_ops.h" #include "wimlib/error.h" -#include "wimlib/hc_matchfinder.h" #include "wimlib/lz_extend.h" +#include "wimlib/lzx_common.h" #include "wimlib/unaligned.h" #include "wimlib/util.h" -struct lzx_output_bitstream; +/* Note: BT_MATCHFINDER_HASH2_ORDER must be defined before including + * bt_matchfinder.h. */ -/* Codewords for the LZX Huffman codes. */ +/* Matchfinders with 16-bit positions */ +#define mf_pos_t u16 +#define MF_SUFFIX _16 +#include "wimlib/bt_matchfinder.h" +#include "wimlib/hc_matchfinder.h" + +/* Matchfinders with 32-bit positions */ +#undef mf_pos_t +#undef MF_SUFFIX +#define mf_pos_t u32 +#define MF_SUFFIX _32 +#include "wimlib/bt_matchfinder.h" +#include "wimlib/hc_matchfinder.h" + +/******************************************************************************/ +/* Compressor structure */ +/*----------------------------------------------------------------------------*/ + +/* Codewords for the Huffman codes */ struct lzx_codewords { u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; u32 len[LZX_LENCODE_NUM_SYMBOLS]; u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; -/* Codeword lengths (in bits) for the LZX Huffman codes. - * A zero length means the corresponding codeword has zero frequency. */ +/* + * Codeword lengths, in bits, for the Huffman codes. + * + * A codeword length of 0 means the corresponding codeword has zero frequency. + * + * The main and length codes each have one extra entry for use as a sentinel. + * See lzx_write_compressed_code(). + */ struct lzx_lens { - u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; - u8 len[LZX_LENCODE_NUM_SYMBOLS]; + u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1]; + u8 len[LZX_LENCODE_NUM_SYMBOLS + 1]; u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; -/* Cost model for near-optimal parsing */ -struct lzx_costs { - - /* 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost for a - * length 'len' match that has an offset belonging to 'offset_slot'. */ - u32 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS]; - - /* Cost for each symbol in the main code */ - u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; - - /* Cost for each symbol in the length code */ - u32 len[LZX_LENCODE_NUM_SYMBOLS]; - -#if LZX_CONSIDER_ALIGNED_COSTS - /* Cost for each symbol in the aligned code */ - u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; -#endif -}; - -/* Codewords and lengths for the LZX Huffman codes. */ +/* Codewords and lengths for the Huffman codes */ struct lzx_codes { struct lzx_codewords codewords; struct lzx_lens lens; }; -/* Symbol frequency counters for the LZX Huffman codes. */ +/* Symbol frequency counters for the Huffman-encoded alphabets */ struct lzx_freqs { u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; u32 len[LZX_LENCODE_NUM_SYMBOLS]; u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; }; -/* Intermediate LZX match/literal format */ -struct lzx_item { +/* Block split statistics. See the "Block splitting algorithm" section later in + * this file for details. */ +#define NUM_LITERAL_OBSERVATION_TYPES 8 +#define NUM_MATCH_OBSERVATION_TYPES 2 +#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + \ + NUM_MATCH_OBSERVATION_TYPES) +struct lzx_block_split_stats { + u32 new_observations[NUM_OBSERVATION_TYPES]; + u32 observations[NUM_OBSERVATION_TYPES]; + u32 num_new_observations; + u32 num_observations; +}; - /* Bits 0 - 9: Main symbol - * Bits 10 - 17: Length symbol - * Bits 18 - 22: Number of extra offset bits - * Bits 23+ : Extra offset bits */ - u64 data; +/* + * Represents a run of literals followed by a match or end-of-block. This + * structure is needed to temporarily store items chosen by the compressor, + * since items cannot be written until all items for the block have been chosen + * and the block's Huffman codes have been computed. + */ +struct lzx_sequence { + + /* The number of literals in the run. This may be 0. The literals are + * not stored explicitly in this structure; instead, they are read + * directly from the uncompressed data. */ + u16 litrunlen; + + /* If the next field doesn't indicate end-of-block, then this is the + * match length minus LZX_MIN_MATCH_LEN. */ + u16 adjusted_length; + + /* If bit 31 is clear, then this field contains the match header in bits + * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a + * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this + * sequence's literal run was the last literal run in the block, so + * there is no match that follows it. */ + u32 adjusted_offset_and_match_hdr; }; /* @@ -228,214 +297,149 @@ struct lzx_optimum_node { u32 cost; /* - * The match or literal that was taken to reach this position. This can - * change as progressively lower cost paths are found to reach this - * position. + * The best arrival to this node, i.e. the match or literal that was + * used to arrive to this position at the given 'cost'. This can change + * as progressively lower cost paths are found to reach this position. * - * This variable is divided into two bitfields. + * For non-gap matches, this variable is divided into two bitfields + * whose meanings depend on the item type: * * Literals: - * Low bits are 1, high bits are the literal. + * Low bits are 0, high bits are the literal. * * Explicit offset matches: - * Low bits are the match length, high bits are the offset plus 2. + * Low bits are the match length, high bits are the offset plus + * LZX_OFFSET_ADJUSTMENT. * * Repeat offset matches: * Low bits are the match length, high bits are the queue index. + * + * For gap matches, identified by OPTIMUM_GAP_MATCH set, special + * behavior applies --- see the code. */ u32 item; #define OPTIMUM_OFFSET_SHIFT 9 #define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1) -} _aligned_attribute(8); +#if CONSIDER_GAP_MATCHES +# define OPTIMUM_GAP_MATCH 0x80000000 +#endif -/* - * Least-recently-used queue for match offsets. - * - * This is represented as a 64-bit integer for efficiency. There are three - * offsets of 21 bits each. Bit 64 is garbage. - */ -struct lzx_lru_queue { - u64 R; -}; +} _aligned_attribute(8); -#define LZX_QUEUE64_OFFSET_SHIFT 21 -#define LZX_QUEUE64_OFFSET_MASK (((u64)1 << LZX_QUEUE64_OFFSET_SHIFT) - 1) +/* The cost model for near-optimal parsing */ +struct lzx_costs { -#define LZX_QUEUE64_R0_SHIFT (0 * LZX_QUEUE64_OFFSET_SHIFT) -#define LZX_QUEUE64_R1_SHIFT (1 * LZX_QUEUE64_OFFSET_SHIFT) -#define LZX_QUEUE64_R2_SHIFT (2 * LZX_QUEUE64_OFFSET_SHIFT) + /* + * 'match_cost[offset_slot][len - LZX_MIN_MATCH_LEN]' is the cost of a + * length 'len' match which has an offset belonging to 'offset_slot'. + * The cost includes the main symbol, the length symbol if required, and + * the extra offset bits if any, excluding any entropy-coded bits + * (aligned offset bits). It does *not* include the cost of the aligned + * offset symbol which may be required. + */ + u16 match_cost[LZX_MAX_OFFSET_SLOTS][LZX_NUM_LENS]; -#define LZX_QUEUE64_R0_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R0_SHIFT) -#define LZX_QUEUE64_R1_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R1_SHIFT) -#define LZX_QUEUE64_R2_MASK (LZX_QUEUE64_OFFSET_MASK << LZX_QUEUE64_R2_SHIFT) + /* Cost of each symbol in the main code */ + u32 main[LZX_MAINCODE_MAX_NUM_SYMBOLS]; -static inline void -lzx_lru_queue_init(struct lzx_lru_queue *queue) -{ - queue->R = ((u64)1 << LZX_QUEUE64_R0_SHIFT) | - ((u64)1 << LZX_QUEUE64_R1_SHIFT) | - ((u64)1 << LZX_QUEUE64_R2_SHIFT); -} + /* Cost of each symbol in the length code */ + u32 len[LZX_LENCODE_NUM_SYMBOLS]; -static inline u64 -lzx_lru_queue_R0(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R0_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} +#if CONSIDER_ALIGNED_COSTS + /* Cost of each symbol in the aligned offset code */ + u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS]; +#endif +}; -static inline u64 -lzx_lru_queue_R1(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R1_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} +struct lzx_output_bitstream; -static inline u64 -lzx_lru_queue_R2(struct lzx_lru_queue queue) -{ - return (queue.R >> LZX_QUEUE64_R2_SHIFT) & LZX_QUEUE64_OFFSET_MASK; -} +/* The main LZX compressor structure */ +struct lzx_compressor { -/* Push a match offset onto the front (most recently used) end of the queue. */ -static inline struct lzx_lru_queue -lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset) -{ - return (struct lzx_lru_queue) { - .R = (queue.R << LZX_QUEUE64_OFFSET_SHIFT) | offset, - }; -} + /* The buffer for preprocessed input data, if not using destructive + * compression */ + void *in_buffer; -/* Pop a match offset off the front (most recently used) end of the queue. */ -static inline u32 -lzx_lru_queue_pop(struct lzx_lru_queue *queue_p) -{ - u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK; - queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT; - return offset; -} + /* If true, then the compressor need not preserve the input buffer if it + * compresses the data successfully */ + bool destructive; -/* Swap a match offset to the front of the queue. */ -static inline struct lzx_lru_queue -lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx) -{ - if (idx == 0) - return queue; - - if (idx == 1) - return (struct lzx_lru_queue) { - .R = (lzx_lru_queue_R1(queue) << LZX_QUEUE64_R0_SHIFT) | - (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R1_SHIFT) | - (queue.R & LZX_QUEUE64_R2_MASK), - }; + /* Pointer to the compress() implementation chosen at allocation time */ + void (*impl)(struct lzx_compressor *, const u8 *, size_t, + struct lzx_output_bitstream *); - return (struct lzx_lru_queue) { - .R = (lzx_lru_queue_R2(queue) << LZX_QUEUE64_R0_SHIFT) | - (queue.R & LZX_QUEUE64_R1_MASK) | - (lzx_lru_queue_R0(queue) << LZX_QUEUE64_R2_SHIFT), - }; -} + /* The log base 2 of the window size for match offset encoding purposes. + * This will be >= LZX_MIN_WINDOW_ORDER and <= LZX_MAX_WINDOW_ORDER. */ + unsigned window_order; -/* The main LZX compressor structure */ -struct lzx_compressor { + /* The number of symbols in the main alphabet. This depends on the + * window order, since the window order determines the maximum possible + * match offset. */ + unsigned num_main_syms; - /* The "nice" match length: if a match of this length is found, then - * choose it immediately without further consideration. */ + /* The "nice" match length: if a match of this length is found, then it + * is chosen immediately without further consideration. */ unsigned nice_match_length; - /* The maximum search depth: consider at most this many potential - * matches at each position. */ + /* The maximum search depth: at most this many potential matches are + * considered at each position. */ unsigned max_search_depth; - /* The log base 2 of the LZX window size for LZ match offset encoding - * purposes. This will be >= LZX_MIN_WINDOW_ORDER and <= - * LZX_MAX_WINDOW_ORDER. */ - unsigned window_order; - - /* The number of symbols in the main alphabet. This depends on - * @window_order, since @window_order determines the maximum possible - * offset. */ - unsigned num_main_syms; - - /* Number of optimization passes per block */ + /* The number of optimization passes per block */ unsigned num_optim_passes; - /* The preprocessed buffer of data being compressed */ - u8 *in_buffer; - - /* The number of bytes of data to be compressed, which is the number of - * bytes of data in @in_buffer that are actually valid. */ - size_t in_nbytes; - - /* Pointer to the compress() implementation chosen at allocation time */ - void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *); - - /* If true, the compressor need not preserve the input buffer if it - * compresses the data successfully. */ - bool destructive; - - /* The Huffman symbol frequency counters for the current block. */ + /* The symbol frequency counters for the current block */ struct lzx_freqs freqs; + /* Block split statistics for the current block */ + struct lzx_block_split_stats split_stats; + /* The Huffman codes for the current and previous blocks. The one with * index 'codes_index' is for the current block, and the other one is - * for the previous block. */ + * for the previous block. */ struct lzx_codes codes[2]; unsigned codes_index; - /* - * The match/literal sequence the algorithm chose for the current block. - * - * Notes on how large this array actually needs to be: - * - * - In lzx_compress_near_optimal(), the maximum block size is - * 'LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1' bytes. This occurs if - * a match of the maximum length is found on the last byte. Although - * it is impossible for this particular case to actually result in a - * parse of all literals, we reserve this many spaces anyway. - * - * - The worst case for lzx_compress_lazy() is a block of almost all - * literals that ends with a series of matches of increasing scores, - * causing a sequence of literals to be chosen before the last match - * is finally chosen. The number of items actually chosen in this - * scenario is limited by the number of distinct match scores that - * exist for matches shorter than 'nice_match_length'. Having - * 'LZX_MAX_MATCH_LEN - 1' extra spaces is plenty for now. - */ - struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN - 1]; + /* The matches and literals that the compressor has chosen for the + * current block. The required length of this array is limited by the + * maximum number of matches that can ever be chosen for a single block, + * plus one for the special entry at the end. */ + struct lzx_sequence chosen_sequences[ + DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1]; - /* Table mapping match offset => offset slot for small offsets */ -#define LZX_NUM_FAST_OFFSETS 32768 - u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS]; + /* Tables for mapping adjusted offsets to offset slots */ + u8 offset_slot_tab_1[32768]; /* offset slots [0, 29] */ + u8 offset_slot_tab_2[128]; /* offset slots [30, 49] */ union { - /* Data for greedy or lazy parsing */ + /* Data for lzx_compress_lazy() */ struct { - /* Hash chains matchfinder (MUST BE LAST!!!) */ - struct hc_matchfinder hc_mf; + /* Hash chains matchfinder (MUST BE LAST!!!) */ + union { + struct hc_matchfinder_16 hc_mf_16; + struct hc_matchfinder_32 hc_mf_32; + }; }; - /* Data for near-optimal parsing */ + /* Data for lzx_compress_near_optimal() */ struct { /* - * The graph nodes for the current block. - * - * We need at least 'LZX_DIV_BLOCK_SIZE + - * LZX_MAX_MATCH_LEN - 1' nodes because that is the - * maximum block size that may be used. Add 1 because - * we need a node to represent end-of-block. + * Array of nodes, one per position, for running the + * minimum-cost path algorithm. * - * It is possible that nodes past end-of-block are - * accessed during match consideration, but this can - * only occur if the block was truncated at - * LZX_DIV_BLOCK_SIZE. So the same bound still applies. - * Note that since nodes past the end of the block will - * never actually have an effect on the items that are - * chosen for the block, it makes no difference what - * their costs are initialized to (if anything). + * This array must be large enough to accommodate the + * worst-case number of nodes, which occurs if the + * compressor finds a match of length LZX_MAX_MATCH_LEN + * at position 'SOFT_MAX_BLOCK_SIZE - 1', producing a + * block of size 'SOFT_MAX_BLOCK_SIZE - 1 + + * LZX_MAX_MATCH_LEN'. Add one for the end-of-block + * node. */ - struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE + - LZX_MAX_MATCH_LEN - 1 + 1]; + struct lzx_optimum_node optimum_nodes[ + SOFT_MAX_BLOCK_SIZE - 1 + + LZX_MAX_MATCH_LEN + 1]; - /* The cost model for the current block */ + /* The cost model for the current optimization pass */ struct lzx_costs costs; /* @@ -451,188 +455,226 @@ struct lzx_compressor { * Note: in rare cases, there will be a very high number * of matches in the block and this array will overflow. * If this happens, we force the end of the current - * block. LZX_CACHE_LENGTH is the length at which we + * block. CACHE_LENGTH is the length at which we * actually check for overflow. The extra slots beyond * this are enough to absorb the worst case overflow, - * which occurs if starting at - * &match_cache[LZX_CACHE_LENGTH - 1], we write the - * match count header, then write - * LZX_MAX_MATCHES_PER_POS matches, then skip searching - * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and + * which occurs if starting at &match_cache[CACHE_LENGTH + * - 1], we write the match count header, then write + * MAX_MATCHES_PER_POS matches, then skip searching for + * matches at 'LZX_MAX_MATCH_LEN - 1' positions and * write the match count header for each. */ - struct lz_match match_cache[LZX_CACHE_LENGTH + - LZX_MAX_MATCHES_PER_POS + + struct lz_match match_cache[CACHE_LENGTH + + MAX_MATCHES_PER_POS + LZX_MAX_MATCH_LEN - 1]; - /* Hash table for finding length 2 matches */ - pos_t hash2_tab[LZX_HASH2_LENGTH] - _aligned_attribute(MATCHFINDER_ALIGNMENT); - - /* Binary trees matchfinder (MUST BE LAST!!!) */ - struct bt_matchfinder bt_mf; + /* Binary trees matchfinder (MUST BE LAST!!!) */ + union { + struct bt_matchfinder_16 bt_mf_16; + struct bt_matchfinder_32 bt_mf_32; + }; }; }; }; +/******************************************************************************/ +/* Matchfinder utilities */ +/*----------------------------------------------------------------------------*/ + +/* + * Will a matchfinder using 16-bit positions be sufficient for compressing + * buffers of up to the specified size? The limit could be 65536 bytes, but we + * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case. + * This requires that the limit be no more than the length of offset_slot_tab_1 + * (currently 32768). + */ +static inline bool +lzx_is_16_bit(size_t max_bufsize) +{ + STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768); + return max_bufsize <= 32768; +} + +/* + * Return the offset slot for the specified adjusted match offset. + */ +static inline unsigned +lzx_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset, + bool is_16_bit) +{ + if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1)) + return c->offset_slot_tab_1[adjusted_offset]; + return c->offset_slot_tab_2[adjusted_offset >> 14]; +} + +/* + * The following macros call either the 16-bit or the 32-bit version of a + * matchfinder function based on the value of 'is_16_bit', which will be known + * at compilation time. + */ + +#define CALL_HC_MF(is_16_bit, c, funcname, ...) \ + ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \ + CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__)); + +#define CALL_BT_MF(is_16_bit, c, funcname, ...) \ + ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \ + CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__)); + +/******************************************************************************/ +/* Output bitstream */ +/*----------------------------------------------------------------------------*/ + +/* + * The LZX bitstream is encoded as a sequence of little endian 16-bit coding + * units. Bits are ordered from most significant to least significant within + * each coding unit. + */ + /* * Structure to keep track of the current state of sending bits to the * compressed output buffer. - * - * The LZX bitstream is encoded as a sequence of 16-bit coding units. */ struct lzx_output_bitstream { - /* Bits that haven't yet been written to the output buffer. */ - u32 bitbuf; + /* Bits that haven't yet been written to the output buffer */ + machine_word_t bitbuf; - /* Number of bits currently held in @bitbuf. */ - u32 bitcount; + /* Number of bits currently held in @bitbuf */ + machine_word_t bitcount; - /* Pointer to the start of the output buffer. */ + /* Pointer to the start of the output buffer */ u8 *start; /* Pointer to the position in the output buffer at which the next coding - * unit should be written. */ + * unit should be written */ u8 *next; - /* Pointer just past the end of the output buffer, rounded down to a - * 2-byte boundary. */ + /* Pointer to just past the end of the output buffer, rounded down by + * one byte if needed to make 'end - start' a multiple of 2 */ u8 *end; }; -/* - * Initialize the output bitstream. - * - * @os - * The output bitstream structure to initialize. - * @buffer - * The buffer being written to. - * @size - * Size of @buffer, in bytes. - */ +/* Can the specified number of bits always be added to 'bitbuf' after all + * pending 16-bit coding units have been flushed? */ +#define CAN_BUFFER(n) ((n) <= WORDBITS - 15) + +/* Initialize the output bitstream to write to the specified buffer. */ static void lzx_init_output(struct lzx_output_bitstream *os, void *buffer, size_t size) { os->bitbuf = 0; os->bitcount = 0; os->start = buffer; - os->next = os->start; - os->end = os->start + (size & ~1); + os->next = buffer; + os->end = (u8 *)buffer + (size & ~1); } /* - * Write some bits to the output bitstream. - * - * The bits are given by the low-order @num_bits bits of @bits. Higher-order - * bits in @bits cannot be set. At most 17 bits can be written at once. - * - * @max_num_bits is a compile-time constant that specifies the maximum number of - * bits that can ever be written at the call site. It is used to optimize away - * the conditional code for writing a second 16-bit coding unit when writing - * fewer than 17 bits. - * - * If the output buffer space is exhausted, then the bits will be ignored, and - * lzx_flush_output() will return 0 when it gets called. + * Add some bits to the bitbuffer variable of the output bitstream. The caller + * must make sure there is enough room. */ static inline void -lzx_write_varbits(struct lzx_output_bitstream *os, - const u32 bits, const unsigned num_bits, - const unsigned max_num_bits) +lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) { - /* This code is optimized for LZX, which never needs to write more than - * 17 bits at once. */ - LZX_ASSERT(num_bits <= 17); - LZX_ASSERT(num_bits <= max_num_bits); - LZX_ASSERT(os->bitcount <= 15); - - /* Add the bits to the bit buffer variable. @bitcount will be at most - * 15, so there will be just enough space for the maximum possible - * @num_bits of 17. */ - os->bitcount += num_bits; os->bitbuf = (os->bitbuf << num_bits) | bits; + os->bitcount += num_bits; +} - /* Check whether any coding units need to be written. */ - if (os->bitcount >= 16) { - - os->bitcount -= 16; - - /* Write a coding unit, unless it would overflow the buffer. */ - if (os->next != os->end) { - put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next); - os->next += 2; - } +/* + * Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits' + * specifies the maximum number of bits that may have been added since the last + * flush. + */ +static inline void +lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits) +{ + /* Masking the number of bits to shift is only needed to avoid undefined + * behavior; we don't actually care about the results of bad shifts. On + * x86, the explicit masking generates no extra code. */ + const u32 shift_mask = WORDBITS - 1; - /* If writing 17 bits, a second coding unit might need to be - * written. But because 'max_num_bits' is a compile-time - * constant, the compiler will optimize away this code at most - * call sites. */ - if (max_num_bits == 17 && os->bitcount == 16) { - if (os->next != os->end) { - put_unaligned_u16_le(os->bitbuf, os->next); - os->next += 2; - } - os->bitcount = 0; - } - } + if (os->end - os->next < 6) + return; + put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) & + shift_mask), os->next + 0); + if (max_num_bits > 16) + put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) & + shift_mask), os->next + 2); + if (max_num_bits > 32) + put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) & + shift_mask), os->next + 4); + os->next += (os->bitcount >> 4) << 1; + os->bitcount &= 15; } -/* Use when @num_bits is a compile-time constant. Otherwise use - * lzx_write_varbits(). */ +/* Add at most 16 bits to the bitbuffer and flush it. */ static inline void lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits) { - lzx_write_varbits(os, bits, num_bits, num_bits); + lzx_add_bits(os, bits, num_bits); + lzx_flush_bits(os, 16); } /* * Flush the last coding unit to the output buffer if needed. Return the total * number of bytes written to the output buffer, or 0 if an overflow occurred. */ -static u32 +static size_t lzx_flush_output(struct lzx_output_bitstream *os) { - if (os->next == os->end) + if (os->end - os->next < 6) return 0; if (os->bitcount != 0) { - put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next); + put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next); os->next += 2; } return os->next - os->start; } -/* Build the main, length, and aligned offset Huffman codes used in LZX. - * - * This takes as input the frequency tables for each code and produces as output - * a set of tables that map symbols to codewords and codeword lengths. */ +/******************************************************************************/ +/* Preparing Huffman codes */ +/*----------------------------------------------------------------------------*/ + +/* + * Build the Huffman codes. This takes as input the frequency tables for each + * code and produces as output a set of tables that map symbols to codewords and + * codeword lengths. + */ static void -lzx_make_huffman_codes(struct lzx_compressor *c) +lzx_build_huffman_codes(struct lzx_compressor *c) { const struct lzx_freqs *freqs = &c->freqs; struct lzx_codes *codes = &c->codes[c->codes_index]; + STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 && + MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN); make_canonical_huffman_code(c->num_main_syms, - LZX_MAX_MAIN_CODEWORD_LEN, + MAIN_CODEWORD_LIMIT, freqs->main, codes->lens.main, codes->codewords.main); + STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 && + LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN); make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS, - LZX_MAX_LEN_CODEWORD_LEN, + LENGTH_CODEWORD_LIMIT, freqs->len, codes->lens.len, codes->codewords.len); + STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS && + ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN); make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS, - LZX_MAX_ALIGNED_CODEWORD_LEN, + ALIGNED_CODEWORD_LIMIT, freqs->aligned, codes->lens.aligned, codes->codewords.aligned); } -/* Reset the symbol frequencies for the LZX Huffman codes. */ +/* Reset the symbol frequencies for the current block. */ static void lzx_reset_symbol_frequencies(struct lzx_compressor *c) { @@ -642,7 +684,6 @@ lzx_reset_symbol_frequencies(struct lzx_compressor *c) static unsigned lzx_compute_precode_items(const u8 lens[restrict], const u8 prev_lens[restrict], - const unsigned num_lens, u32 precode_freqs[restrict], unsigned precode_items[restrict]) { @@ -655,16 +696,17 @@ lzx_compute_precode_items(const u8 lens[restrict], itemptr = precode_items; run_start = 0; - do { - /* Find the next run of codeword lengths. */ + + while (!((len = lens[run_start]) & 0x80)) { /* len = the length being repeated */ - len = lens[run_start]; + + /* Find the next run of codeword lengths. */ run_end = run_start + 1; /* Fast case for a single length. */ - if (likely(run_end == num_lens || len != lens[run_end])) { + if (likely(len != lens[run_end])) { delta = prev_lens[run_start] - len; if (delta < 0) delta += 17; @@ -677,14 +719,14 @@ lzx_compute_precode_items(const u8 lens[restrict], /* Extend the run. */ do { run_end++; - } while (run_end != num_lens && len == lens[run_end]); + } while (len == lens[run_end]); if (len == 0) { /* Run of zeroes. */ /* Symbol 18: RLE 20 to 51 zeroes at a time. */ while ((run_end - run_start) >= 20) { - extra_bits = min((run_end - run_start) - 20, 0x1f); + extra_bits = min((run_end - run_start) - 20, 0x1F); precode_freqs[18]++; *itemptr++ = 18 | (extra_bits << 5); run_start += 20 + extra_bits; @@ -692,7 +734,7 @@ lzx_compute_precode_items(const u8 lens[restrict], /* Symbol 17: RLE 4 to 19 zeroes at a time. */ if ((run_end - run_start) >= 4) { - extra_bits = min((run_end - run_start) - 4, 0xf); + extra_bits = min((run_end - run_start) - 4, 0xF); precode_freqs[17]++; *itemptr++ = 17 | (extra_bits << 5); run_start += 4 + extra_bits; @@ -723,11 +765,15 @@ lzx_compute_precode_items(const u8 lens[restrict], *itemptr++ = delta; run_start++; } - } while (run_start != num_lens); + } return itemptr - precode_items; } +/******************************************************************************/ +/* Outputting compressed data */ +/*----------------------------------------------------------------------------*/ + /* * Output a Huffman code in the compressed form used in LZX. * @@ -771,6 +817,8 @@ lzx_write_compressed_code(struct lzx_output_bitstream *os, unsigned precode_item; unsigned precode_sym; unsigned i; + u8 saved = lens[num_lens]; + *(u8 *)(lens + num_lens) = 0x80; for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++) precode_freqs[i] = 0; @@ -779,13 +827,13 @@ lzx_write_compressed_code(struct lzx_output_bitstream *os, * the codeword lengths in the larger code will be output. */ num_precode_items = lzx_compute_precode_items(lens, prev_lens, - num_lens, precode_freqs, precode_items); /* Build the precode. */ - make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS, - LZX_MAX_PRE_CODEWORD_LEN, + STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 && + PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN); + make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS, PRE_CODEWORD_LIMIT, precode_freqs, precode_lens, precode_codewords); @@ -797,78 +845,25 @@ lzx_write_compressed_code(struct lzx_output_bitstream *os, for (i = 0; i < num_precode_items; i++) { precode_item = precode_items[i]; precode_sym = precode_item & 0x1F; - lzx_write_varbits(os, precode_codewords[precode_sym], - precode_lens[precode_sym], - LZX_MAX_PRE_CODEWORD_LEN); + lzx_add_bits(os, precode_codewords[precode_sym], + precode_lens[precode_sym]); if (precode_sym >= 17) { if (precode_sym == 17) { - lzx_write_bits(os, precode_item >> 5, 4); + lzx_add_bits(os, precode_item >> 5, 4); } else if (precode_sym == 18) { - lzx_write_bits(os, precode_item >> 5, 5); + lzx_add_bits(os, precode_item >> 5, 5); } else { - lzx_write_bits(os, (precode_item >> 5) & 1, 1); + lzx_add_bits(os, (precode_item >> 5) & 1, 1); precode_sym = precode_item >> 6; - lzx_write_varbits(os, precode_codewords[precode_sym], - precode_lens[precode_sym], - LZX_MAX_PRE_CODEWORD_LEN); + lzx_add_bits(os, precode_codewords[precode_sym], + precode_lens[precode_sym]); } } + STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1)); + lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1); } -} - -/* Output a match or literal. */ -static inline void -lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item, - unsigned ones_if_aligned, const struct lzx_codes *codes) -{ - u64 data = item.data; - unsigned main_symbol; - unsigned len_symbol; - unsigned num_extra_bits; - u32 extra_bits; - - main_symbol = data & 0x3FF; - lzx_write_varbits(os, codes->codewords.main[main_symbol], - codes->lens.main[main_symbol], - LZX_MAX_MAIN_CODEWORD_LEN); - - if (main_symbol < LZX_NUM_CHARS) /* Literal? */ - return; - - len_symbol = (data >> 10) & 0xFF; - - if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) { - lzx_write_varbits(os, codes->codewords.len[len_symbol], - codes->lens.len[len_symbol], - LZX_MAX_LEN_CODEWORD_LEN); - } - - num_extra_bits = (data >> 18) & 0x1F; - if (num_extra_bits == 0) /* Small offset or repeat offset match? */ - return; - - extra_bits = data >> 23; - - if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) { - - /* Aligned offset blocks: The low 3 bits of the extra offset - * bits are Huffman-encoded using the aligned offset code. The - * remaining bits are output literally. */ - - lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS, - num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS, - 17 - LZX_NUM_ALIGNED_OFFSET_BITS); - - lzx_write_varbits(os, - codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK], - codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK], - LZX_MAX_ALIGNED_CODEWORD_LEN); - } else { - /* Verbatim blocks, or fewer than 3 extra bits: All extra - * offset bits are output literally. */ - lzx_write_varbits(os, extra_bits, num_extra_bits, 17); - } + *(u8 *)(lens + num_lens) = saved; } /* @@ -881,58 +876,193 @@ lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item, * @block_type * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or * LZX_BLOCKTYPE_VERBATIM). - * @items - * The array of matches/literals to output. - * @num_items - * Number of matches/literals to output (length of @items). + * @block_data + * The uncompressed data of the block. + * @sequences + * The matches and literals to output, given as a series of sequences. * @codes - * The main, length, and aligned offset Huffman codes for the current - * LZX compressed block. + * The main, length, and aligned offset Huffman codes for the block. */ static void -lzx_write_items(struct lzx_output_bitstream *os, int block_type, - const struct lzx_item items[], u32 num_items, - const struct lzx_codes *codes) +lzx_write_sequences(struct lzx_output_bitstream *os, int block_type, + const u8 *block_data, const struct lzx_sequence sequences[], + const struct lzx_codes *codes) { - unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED); + const struct lzx_sequence *seq = sequences; + u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED); + + for (;;) { + /* Output the next sequence. */ + + unsigned litrunlen = seq->litrunlen; + unsigned match_hdr; + unsigned main_symbol; + unsigned adjusted_length; + u32 adjusted_offset; + unsigned offset_slot; + unsigned num_extra_bits; + u32 extra_bits; + + /* Output the literal run of the sequence. */ + + if (litrunlen) { /* Is the literal run nonempty? */ + + /* Verify optimization is enabled on 64-bit */ + STATIC_ASSERT(WORDBITS < 64 || + CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)); + + if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) { + + /* 64-bit: write 3 literals at a time. */ + while (litrunlen >= 3) { + unsigned lit0 = block_data[0]; + unsigned lit1 = block_data[1]; + unsigned lit2 = block_data[2]; + lzx_add_bits(os, codes->codewords.main[lit0], + codes->lens.main[lit0]); + lzx_add_bits(os, codes->codewords.main[lit1], + codes->lens.main[lit1]); + lzx_add_bits(os, codes->codewords.main[lit2], + codes->lens.main[lit2]); + lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT); + block_data += 3; + litrunlen -= 3; + } + if (litrunlen--) { + unsigned lit = *block_data++; + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); + if (litrunlen--) { + unsigned lit = *block_data++; + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); + lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT); + } else { + lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT); + } + } + } else { + /* 32-bit: write 1 literal at a time. */ + do { + unsigned lit = *block_data++; + lzx_add_bits(os, codes->codewords.main[lit], + codes->lens.main[lit]); + lzx_flush_bits(os, MAIN_CODEWORD_LIMIT); + } while (--litrunlen); + } + } + + /* Was this the last literal run? */ + if (seq->adjusted_offset_and_match_hdr & 0x80000000) + return; + + /* Nope; output the match. */ + + match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF; + main_symbol = LZX_NUM_CHARS + match_hdr; + adjusted_length = seq->adjusted_length; - for (u32 i = 0; i < num_items; i++) - lzx_write_item(os, items[i], ones_if_aligned, codes); + block_data += adjusted_length + LZX_MIN_MATCH_LEN; + + offset_slot = match_hdr / LZX_NUM_LEN_HEADERS; + adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9; + + num_extra_bits = lzx_extra_offset_bits[offset_slot]; + extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot]; + + #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \ + 14 + ALIGNED_CODEWORD_LIMIT) + + /* Verify optimization is enabled on 64-bit */ + STATIC_ASSERT(WORDBITS < 64 || CAN_BUFFER(MAX_MATCH_BITS)); + + /* Output the main symbol for the match. */ + + lzx_add_bits(os, codes->codewords.main[main_symbol], + codes->lens.main[main_symbol]); + if (!CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, MAIN_CODEWORD_LIMIT); + + /* If needed, output the length symbol for the match. */ + + if (adjusted_length >= LZX_NUM_PRIMARY_LENS) { + lzx_add_bits(os, codes->codewords.len[adjusted_length - + LZX_NUM_PRIMARY_LENS], + codes->lens.len[adjusted_length - + LZX_NUM_PRIMARY_LENS]); + if (!CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT); + } + + /* Output the extra offset bits for the match. In aligned + * offset blocks, the lowest 3 bits of the adjusted offset are + * Huffman-encoded using the aligned offset code, provided that + * there are at least extra 3 offset bits required. All other + * extra offset bits are output verbatim. */ + + if ((adjusted_offset & ones_if_aligned) >= 16) { + + lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS, + num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS); + if (!CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, 14); + + lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & + LZX_ALIGNED_OFFSET_BITMASK], + codes->lens.aligned[adjusted_offset & + LZX_ALIGNED_OFFSET_BITMASK]); + if (!CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT); + } else { + STATIC_ASSERT(CAN_BUFFER(17)); + + lzx_add_bits(os, extra_bits, num_extra_bits); + if (!CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, 17); + } + + if (CAN_BUFFER(MAX_MATCH_BITS)) + lzx_flush_bits(os, MAX_MATCH_BITS); + + /* Advance to the next sequence. */ + seq++; + } } static void -lzx_write_compressed_block(int block_type, +lzx_write_compressed_block(const u8 *block_begin, + int block_type, u32 block_size, unsigned window_order, unsigned num_main_syms, - const struct lzx_item chosen_items[], - u32 num_chosen_items, + const struct lzx_sequence sequences[], const struct lzx_codes * codes, const struct lzx_lens * prev_lens, struct lzx_output_bitstream * os) { - LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED || - block_type == LZX_BLOCKTYPE_VERBATIM); - /* The first three bits indicate the type of block and are one of the * LZX_BLOCKTYPE_* constants. */ lzx_write_bits(os, block_type, 3); - /* Output the block size. + /* + * Output the block size. * - * The original LZX format seemed to always encode the block size in 3 - * bytes. However, the implementation in WIMGAPI, as used in WIM files, - * uses the first bit to indicate whether the block is the default size - * (32768) or a different size given explicitly by the next 16 bits. + * The original LZX format encoded the block size in 24 bits. However, + * the LZX format used in WIM archives uses 1 bit to specify whether the + * block has the default size of 32768 bytes, then optionally 16 bits to + * specify a non-default size. This works fine for Microsoft's WIM + * software (WIMGAPI), which never compresses more than 32768 bytes at a + * time with LZX. However, as an extension, our LZX compressor supports + * compressing up to 2097152 bytes, with a corresponding increase in + * window size. It is possible for blocks in these larger buffers to + * exceed 65535 bytes; such blocks cannot have their size represented in + * 16 bits. * - * By default, this compressor uses a window size of 32768 and therefore - * follows the WIMGAPI behavior. However, this compressor also supports - * window sizes greater than 32768 bytes, which do not appear to be - * supported by WIMGAPI. In such cases, we retain the default size bit - * to mean a size of 32768 bytes but output non-default block size in 24 - * bits rather than 16. The compatibility of this behavior is unknown - * because WIMs created with chunk size greater than 32768 can seemingly - * only be opened by wimlib anyway. */ + * The chosen solution was to use 24 bits for the block size when + * possibly required --- specifically, when the compressor has been + * allocated to be capable of compressing more than 32768 bytes at once + * (which also causes the number of main symbols to be increased). + */ if (block_size == LZX_DEFAULT_BLOCK_SIZE) { lzx_write_bits(os, 1, 1); } else { @@ -966,29 +1096,33 @@ lzx_write_compressed_block(int block_type, LZX_LENCODE_NUM_SYMBOLS); /* Output the compressed matches and literals. */ - lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes); + lzx_write_sequences(os, block_type, block_begin, sequences, codes); } -/* Given the frequencies of symbols in an LZX-compressed block and the +/* + * Given the frequencies of symbols in an LZX-compressed block and the * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively, - * will take fewer bits to output. */ + * will take fewer bits to output. + */ static int lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs, const struct lzx_codes * codes) { - u32 aligned_cost = 0; u32 verbatim_cost = 0; + u32 aligned_cost = 0; - /* A verbatim block requires 3 bits in each place that an aligned symbol - * would be used in an aligned offset block. */ + /* A verbatim block requires 3 bits in each place that an aligned offset + * symbol would be used in an aligned offset block. */ for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { verbatim_cost += LZX_NUM_ALIGNED_OFFSET_BITS * freqs->aligned[i]; aligned_cost += codes->lens.aligned[i] * freqs->aligned[i]; } - /* Account for output of the aligned offset code. */ - aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * LZX_ALIGNEDCODE_NUM_SYMBOLS; + /* Account for the cost of sending the codeword lengths of the aligned + * offset code. */ + aligned_cost += LZX_ALIGNEDCODE_ELEMENT_SIZE * + LZX_ALIGNEDCODE_NUM_SYMBOLS; if (aligned_cost < verbatim_cost) return LZX_BLOCKTYPE_ALIGNED; @@ -997,198 +1131,359 @@ lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs, } /* - * Finish an LZX block: + * Flush an LZX block: + * + * 1. Build the Huffman codes. + * 2. Decide whether to output the block as VERBATIM or ALIGNED. + * 3. Write the block. + * 4. Swap the indices of the current and previous Huffman codes. * - * - build the Huffman codes - * - decide whether to output the block as VERBATIM or ALIGNED - * - output the block - * - swap the indices of the current and previous Huffman codes + * Note: we never output UNCOMPRESSED blocks. This probably should be + * implemented sometime, but it doesn't make much difference. */ static void -lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, - u32 block_size, u32 num_chosen_items) +lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, + const u8 *block_begin, u32 block_size, u32 seq_idx) { int block_type; - lzx_make_huffman_codes(c); + lzx_build_huffman_codes(c); block_type = lzx_choose_verbatim_or_aligned(&c->freqs, &c->codes[c->codes_index]); - lzx_write_compressed_block(block_type, + lzx_write_compressed_block(block_begin, + block_type, block_size, c->window_order, c->num_main_syms, - c->chosen_items, - num_chosen_items, + &c->chosen_sequences[seq_idx], &c->codes[c->codes_index], &c->codes[c->codes_index ^ 1].lens, os); c->codes_index ^= 1; } -/* Return the offset slot for the specified offset, which must be - * less than LZX_NUM_FAST_OFFSETS. */ -static inline unsigned -lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset) +/******************************************************************************/ +/* Block splitting algorithm */ +/*----------------------------------------------------------------------------*/ + +/* + * The problem of block splitting is to decide when it is worthwhile to start a + * new block with new entropy codes. There is a theoretically optimal solution: + * recursively consider every possible block split, considering the exact cost + * of each block, and choose the minimum cost approach. But this is far too + * slow. Instead, as an approximation, we can count symbols and after every N + * symbols, compare the expected distribution of symbols based on the previous + * data with the actual distribution. If they differ "by enough", then start a + * new block. + * + * As an optimization and heuristic, we don't distinguish between every symbol + * but rather we combine many symbols into a single "observation type". For + * literals we only look at the high bits and low bits, and for matches we only + * look at whether the match is long or not. The assumption is that for typical + * "real" data, places that are good block boundaries will tend to be noticable + * based only on changes in these aggregate frequencies, without looking for + * subtle differences in individual symbols. For example, a change from ASCII + * bytes to non-ASCII bytes, or from few matches (generally less compressible) + * to many matches (generally more compressible), would be easily noticed based + * on the aggregates. + * + * For determining whether the frequency distributions are "different enough" to + * start a new block, the simply heuristic of splitting when the sum of absolute + * differences exceeds a constant seems to be good enough. + * + * Finally, for an approximation, it is not strictly necessary that the exact + * symbols being used are considered. With "near-optimal parsing", for example, + * the actual symbols that will be used are unknown until after the block + * boundary is chosen and the block has been optimized. Since the final choices + * cannot be used, we can use preliminary "greedy" choices instead. + */ + +/* Initialize the block split statistics when starting a new block. */ +static void +lzx_init_block_split_stats(struct lzx_block_split_stats *stats) +{ + memset(stats, 0, sizeof(*stats)); +} + +/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the + * literal, for 8 possible literal observation types. */ +static inline void +lzx_observe_literal(struct lzx_block_split_stats *stats, u8 lit) +{ + stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++; + stats->num_new_observations++; +} + +/* Match observation. Heuristic: use one observation type for "short match" and + * one observation type for "long match". */ +static inline void +lzx_observe_match(struct lzx_block_split_stats *stats, unsigned length) +{ + stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++; + stats->num_new_observations++; +} + +static bool +lzx_should_end_block(struct lzx_block_split_stats *stats) +{ + if (stats->num_observations > 0) { + + /* Note: to avoid slow divisions, we do not divide by + * 'num_observations', but rather do all math with the numbers + * multiplied by 'num_observations'. */ + u32 total_delta = 0; + for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) { + u32 expected = stats->observations[i] * + stats->num_new_observations; + u32 actual = stats->new_observations[i] * + stats->num_observations; + u32 delta = (actual > expected) ? actual - expected : + expected - actual; + total_delta += delta; + } + + /* Ready to end the block? */ + if (total_delta >= + stats->num_new_observations * 7 / 8 * stats->num_observations) + return true; + } + + for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) { + stats->num_observations += stats->new_observations[i]; + stats->observations[i] += stats->new_observations[i]; + stats->new_observations[i] = 0; + } + stats->num_new_observations = 0; + return false; +} + +/******************************************************************************/ +/* Slower ("near-optimal") compression algorithm */ +/*----------------------------------------------------------------------------*/ + +/* + * Least-recently-used queue for match offsets. + * + * This is represented as a 64-bit integer for efficiency. There are three + * offsets of 21 bits each. Bit 64 is garbage. + */ +struct lzx_lru_queue { + u64 R; +} _aligned_attribute(8); + +#define LZX_QUEUE_OFFSET_SHIFT 21 +#define LZX_QUEUE_OFFSET_MASK (((u64)1 << LZX_QUEUE_OFFSET_SHIFT) - 1) + +#define LZX_QUEUE_R0_SHIFT (0 * LZX_QUEUE_OFFSET_SHIFT) +#define LZX_QUEUE_R1_SHIFT (1 * LZX_QUEUE_OFFSET_SHIFT) +#define LZX_QUEUE_R2_SHIFT (2 * LZX_QUEUE_OFFSET_SHIFT) + +#define LZX_QUEUE_R0_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R0_SHIFT) +#define LZX_QUEUE_R1_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R1_SHIFT) +#define LZX_QUEUE_R2_MASK (LZX_QUEUE_OFFSET_MASK << LZX_QUEUE_R2_SHIFT) + +#define LZX_QUEUE_INITIALIZER { \ + ((u64)1 << LZX_QUEUE_R0_SHIFT) | \ + ((u64)1 << LZX_QUEUE_R1_SHIFT) | \ + ((u64)1 << LZX_QUEUE_R2_SHIFT) } + +static inline u64 +lzx_lru_queue_R0(struct lzx_lru_queue queue) +{ + return (queue.R >> LZX_QUEUE_R0_SHIFT) & LZX_QUEUE_OFFSET_MASK; +} + +static inline u64 +lzx_lru_queue_R1(struct lzx_lru_queue queue) +{ + return (queue.R >> LZX_QUEUE_R1_SHIFT) & LZX_QUEUE_OFFSET_MASK; +} + +static inline u64 +lzx_lru_queue_R2(struct lzx_lru_queue queue) +{ + return (queue.R >> LZX_QUEUE_R2_SHIFT) & LZX_QUEUE_OFFSET_MASK; +} + +/* Push a match offset onto the front (most recently used) end of the queue. */ +static inline struct lzx_lru_queue +lzx_lru_queue_push(struct lzx_lru_queue queue, u32 offset) { - LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS); - return c->offset_slot_fast[offset]; + return (struct lzx_lru_queue) { + .R = (queue.R << LZX_QUEUE_OFFSET_SHIFT) | offset, + }; } -/* Tally, and optionally record, the specified literal byte. */ -static inline void -lzx_declare_literal(struct lzx_compressor *c, unsigned literal, - struct lzx_item **next_chosen_item) +/* Swap a match offset to the front of the queue. */ +static inline struct lzx_lru_queue +lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx) { - unsigned main_symbol = lzx_main_symbol_for_literal(literal); + unsigned shift = idx * 21; + const u64 mask = LZX_QUEUE_R0_MASK; + const u64 mask_high = mask << shift; - c->freqs.main[main_symbol]++; - - if (next_chosen_item) { - *(*next_chosen_item)++ = (struct lzx_item) { - .data = main_symbol, - }; - } + return (struct lzx_lru_queue) { + (queue.R & ~(mask | mask_high)) | + ((queue.R & mask_high) >> shift) | + ((queue.R & mask) << shift) + }; } -/* Tally, and optionally record, the specified repeat offset match. */ -static inline void -lzx_declare_repeat_offset_match(struct lzx_compressor *c, - unsigned len, unsigned rep_index, - struct lzx_item **next_chosen_item) +static inline u32 +lzx_walk_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit, + bool record) { - unsigned len_header; - unsigned len_symbol; - unsigned main_symbol; - - if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) { - len_header = len - LZX_MIN_MATCH_LEN; - len_symbol = LZX_LENCODE_NUM_SYMBOLS; - } else { - len_header = LZX_NUM_PRIMARY_LENS; - len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS; - c->freqs.len[len_symbol]++; + u32 node_idx = block_size; + u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1; + u32 lit_start_node; + + if (record) { + /* Special value to mark last sequence */ + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000; + lit_start_node = node_idx; } - main_symbol = lzx_main_symbol_for_match(rep_index, len_header); - - c->freqs.main[main_symbol]++; - - if (next_chosen_item) { - *(*next_chosen_item)++ = (struct lzx_item) { - .data = (u64)main_symbol | ((u64)len_symbol << 10), - }; - } -} + for (;;) { + u32 item; + u32 len; + u32 adjusted_offset; + unsigned v; + unsigned offset_slot; + + /* Tally literals until either a match or the beginning of the + * block is reached. Note: the item in the node at the + * beginning of the block has all bits set, causing this loop to + * end when it is reached. */ + for (;;) { + item = c->optimum_nodes[node_idx].item; + if (item & OPTIMUM_LEN_MASK) + break; + c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++; + node_idx--; + } -/* Tally, and optionally record, the specified explicit offset match. */ -static inline void -lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset, - struct lzx_item **next_chosen_item) -{ - unsigned len_header; - unsigned len_symbol; - unsigned main_symbol; - unsigned offset_slot; - unsigned num_extra_bits; - u32 extra_bits; + #if CONSIDER_GAP_MATCHES + if (item & OPTIMUM_GAP_MATCH) { - if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) { - len_header = len - LZX_MIN_MATCH_LEN; - len_symbol = LZX_LENCODE_NUM_SYMBOLS; - } else { - len_header = LZX_NUM_PRIMARY_LENS; - len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS; - c->freqs.len[len_symbol]++; - } + if (node_idx == 0) + break; - offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ? - lzx_get_offset_slot_fast(c, offset) : - lzx_get_offset_slot(offset); + /* Record the literal run length for the next sequence + * (the "previous sequence" when walking backwards). */ + len = item & OPTIMUM_LEN_MASK; + if (record) { + c->chosen_sequences[seq_idx--].litrunlen = + lit_start_node - node_idx; + lit_start_node = node_idx - len; + } - main_symbol = lzx_main_symbol_for_match(offset_slot, len_header); + /* Tally the rep0 match after the gap. */ + v = len - LZX_MIN_MATCH_LEN; + if (record) + c->chosen_sequences[seq_idx].adjusted_length = v; + if (v >= LZX_NUM_PRIMARY_LENS) { + c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; + v = LZX_NUM_PRIMARY_LENS; + } + c->freqs.main[LZX_NUM_CHARS + v]++; + if (record) + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v; + + /* Tally the literal in the gap. */ + c->freqs.main[(u8)(item >> OPTIMUM_OFFSET_SHIFT)]++; + + /* Fall through and tally the match before the gap. + * (It was temporarily saved in the 'cost' field of the + * previous node, which was free to reuse.) */ + item = c->optimum_nodes[--node_idx].cost; + node_idx -= len; + } + #else /* CONSIDER_GAP_MATCHES */ + if (node_idx == 0) + break; + #endif /* !CONSIDER_GAP_MATCHES */ + + len = item & OPTIMUM_LEN_MASK; + adjusted_offset = item >> OPTIMUM_OFFSET_SHIFT; + + /* Record the literal run length for the next sequence (the + * "previous sequence" when walking backwards). */ + if (record) { + c->chosen_sequences[seq_idx--].litrunlen = + lit_start_node - node_idx; + node_idx -= len; + lit_start_node = node_idx; + } else { + node_idx -= len; + } - c->freqs.main[main_symbol]++; + /* Record a match. */ - num_extra_bits = lzx_extra_offset_bits[offset_slot]; + /* Tally the aligned offset symbol if needed. */ + if (adjusted_offset >= 16) + c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++; - if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS) - c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) & - LZX_ALIGNED_OFFSET_BITMASK]++; + /* Record the adjusted length. */ + v = len - LZX_MIN_MATCH_LEN; + if (record) + c->chosen_sequences[seq_idx].adjusted_length = v; - if (next_chosen_item) { + /* Tally the length symbol if needed. */ + if (v >= LZX_NUM_PRIMARY_LENS) { + c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; + v = LZX_NUM_PRIMARY_LENS; + } - extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) - - lzx_offset_slot_base[offset_slot]; + /* Tally the main symbol. */ + offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit); + v += offset_slot * LZX_NUM_LEN_HEADERS; + c->freqs.main[LZX_NUM_CHARS + v]++; - BUILD_BUG_ON(LZX_MAINCODE_MAX_NUM_SYMBOLS > (1 << 10)); - BUILD_BUG_ON(LZX_LENCODE_NUM_SYMBOLS > (1 << 8)); - *(*next_chosen_item)++ = (struct lzx_item) { - .data = (u64)main_symbol | - ((u64)len_symbol << 10) | - ((u64)num_extra_bits << 18) | - ((u64)extra_bits << 23), - }; + /* Record the adjusted offset and match header. */ + if (record) { + c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = + (adjusted_offset << 9) | v; + } } -} + /* Record the literal run length for the first sequence. */ + if (record) + c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx; -/* Tally, and optionally record, the specified match or literal. */ -static inline void -lzx_declare_item(struct lzx_compressor *c, u32 item, - struct lzx_item **next_chosen_item) -{ - u32 len = item & OPTIMUM_LEN_MASK; - u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT; - - if (len == 1) - lzx_declare_literal(c, offset_data, next_chosen_item); - else if (offset_data < LZX_NUM_RECENT_OFFSETS) - lzx_declare_repeat_offset_match(c, len, offset_data, - next_chosen_item); - else - lzx_declare_explicit_offset_match(c, len, - offset_data - LZX_OFFSET_ADJUSTMENT, - next_chosen_item); + /* Return the index in chosen_sequences at which the sequences begin. */ + return seq_idx; } +/* + * Given the minimum-cost path computed through the item graph for the current + * block, walk the path and count how many of each symbol in each Huffman-coded + * alphabet would be required to output the items (matches and literals) along + * the path. + * + * Note that the path will be walked backwards (from the end of the block to the + * beginning of the block), but this doesn't matter because this function only + * computes frequencies. + */ static inline void -lzx_record_item_list(struct lzx_compressor *c, - struct lzx_optimum_node *cur_node, - struct lzx_item **next_chosen_item) +lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) { - struct lzx_optimum_node *end_node; - u32 saved_item; - u32 item; - - /* The list is currently in reverse order (last item to first item). - * Reverse it. */ - end_node = cur_node; - saved_item = cur_node->item; - do { - item = saved_item; - cur_node -= item & OPTIMUM_LEN_MASK; - saved_item = cur_node->item; - cur_node->item = item; - } while (cur_node != c->optimum_nodes); - - /* Walk the list of items from beginning to end, tallying and recording - * each item. */ - do { - lzx_declare_item(c, cur_node->item, next_chosen_item); - cur_node += (cur_node->item) & OPTIMUM_LEN_MASK; - } while (cur_node != end_node); + lzx_walk_item_list(c, block_size, is_16_bit, false); } -static inline void -lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node) +/* + * Like lzx_tally_item_list(), but this function also generates the list of + * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences, + * ready to be output to the bitstream after the Huffman codes are computed. + * The lzx_sequences will be written to decreasing memory addresses as the path + * is walked backwards, which means they will end up in the expected + * first-to-last order. The return value is the index in c->chosen_sequences at + * which the lzx_sequences begin. + */ +static inline u32 +lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit) { - /* Since we're just tallying the items, we don't need to reverse the - * list. Processing the items in reverse order is fine. */ - do { - lzx_declare_item(c, cur_node->item, NULL); - cur_node -= (cur_node->item & OPTIMUM_LEN_MASK); - } while (cur_node != c->optimum_nodes); + return lzx_walk_item_list(c, block_size, is_16_bit, true); } /* @@ -1197,14 +1492,16 @@ lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node) * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in * the current block, plus one extra node for end-of-block. The edges of the * graph are matches and literals. The goal is to find the minimum cost path - * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'. + * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]', given the cost + * model 'c->costs'. * * The algorithm works forwards, starting at 'c->optimum_nodes[0]' and * proceeding forwards one node at a time. At each node, a selection of matches * (len >= 2), as well as the literal byte (len = 1), is considered. An item of * length 'len' provides a new path to reach the node 'len' bytes later. If * such a path is the lowest cost found so far to reach that later node, then - * that later node is updated with the new path. + * that later node is updated with the new cost and the "arrival" which provided + * that cost. * * Note that although this algorithm is based on minimum cost path search, due * to various simplifying assumptions the result is not guaranteed to be the @@ -1219,40 +1516,69 @@ lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node) * path to contain a prefix, ending at a position, where that path prefix is * *not* the minimum cost path to that position. This can happen if such a path * prefix results in a different adaptive state which results in lower costs - * later. The algorithm does not solve this problem; it only considers the - * lowest cost to reach each individual position. + * later. The algorithm does not solve this problem in general; it only looks + * one step ahead, with the exception of special consideration for "gap + * matches". */ -static struct lzx_lru_queue +static inline struct lzx_lru_queue lzx_find_min_cost_path(struct lzx_compressor * const restrict c, const u8 * const restrict block_begin, const u32 block_size, - const struct lzx_lru_queue initial_queue) + const struct lzx_lru_queue initial_queue, + bool is_16_bit) { struct lzx_optimum_node *cur_node = c->optimum_nodes; - struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size]; + struct lzx_optimum_node * const end_node = cur_node + block_size; struct lz_match *cache_ptr = c->match_cache; const u8 *in_next = block_begin; const u8 * const block_end = block_begin + block_size; - /* Instead of storing the match offset LRU queues in the + /* + * Instead of storing the match offset LRU queues in the * 'lzx_optimum_node' structures, we save memory (and cache lines) by * storing them in a smaller array. This works because the algorithm * only requires a limited history of the adaptive state. Once a given - * state is more than LZX_MAX_MATCH_LEN bytes behind the current node, - * it is no longer needed. */ + * state is more than LZX_MAX_MATCH_LEN bytes behind the current node + * (more if gap match consideration is enabled; we just round up to 512 + * so it's a power of 2), it is no longer needed. + * + * The QUEUE() macro finds the queue for the given node. This macro has + * been optimized by taking advantage of 'struct lzx_lru_queue' and + * 'struct lzx_optimum_node' both being 8 bytes in size and alignment. + */ struct lzx_lru_queue queues[512]; + STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1); + STATIC_ASSERT(sizeof(c->optimum_nodes[0]) == sizeof(queues[0])); +#define QUEUE(node) \ + (*(struct lzx_lru_queue *)((char *)queues + \ + ((uintptr_t)(node) % (ARRAY_LEN(queues) * sizeof(queues[0]))))) + /*(queues[(uintptr_t)(node) / sizeof(*(node)) % ARRAY_LEN(queues)])*/ + +#if CONSIDER_GAP_MATCHES + u32 matches_before_gap[ARRAY_LEN(queues)]; +#define MATCH_BEFORE_GAP(node) \ + (matches_before_gap[(uintptr_t)(node) / sizeof(*(node)) % \ + ARRAY_LEN(matches_before_gap)]) +#endif - BUILD_BUG_ON(ARRAY_LEN(queues) < LZX_MAX_MATCH_LEN + 1); -#define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)]) - - /* Initially, the cost to reach each node is "infinity". */ + /* + * Initially, the cost to reach each node is "infinity". + * + * The first node actually should have cost 0, but "infinity" + * (0xFFFFFFFF) works just as well because it immediately overflows. + * + * The following statement also intentionally sets the 'item' of the + * first node, which would otherwise have no meaning, to 0xFFFFFFFF for + * use as a sentinel. See lzx_walk_item_list(). + */ memset(c->optimum_nodes, 0xFF, (block_size + 1) * sizeof(c->optimum_nodes[0])); - QUEUE(block_begin) = initial_queue; + /* Initialize the recent offsets queue for the first node. */ + QUEUE(cur_node) = initial_queue; + + do { /* For each node in the block in position order... */ - /* The following loop runs 'block_size' iterations, one per node. */ - do { unsigned num_matches; unsigned literal; u32 cost; @@ -1290,11 +1616,11 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN); const u8 *matchptr; - /* Consider R0 match */ - matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next)); + /* Consider rep0 matches. */ + matchptr = in_next - lzx_lru_queue_R0(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R0_done; - BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2); + goto rep0_done; + STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2); do { u32 cost = cur_node->cost + c->costs.match_cost[0][ @@ -1310,17 +1636,17 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R0_done: + rep0_done: - /* Consider R1 match */ - matchptr = in_next - lzx_lru_queue_R1(QUEUE(in_next)); + /* Consider rep1 matches. */ + matchptr = in_next - lzx_lru_queue_R1(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R1_done; + goto rep1_done; if (matchptr[next_len - 1] != in_next[next_len - 1]) - goto R1_done; + goto rep1_done; for (unsigned len = 2; len < next_len - 1; len++) if (matchptr[len] != in_next[len]) - goto R1_done; + goto rep1_done; do { u32 cost = cur_node->cost + c->costs.match_cost[1][ @@ -1336,17 +1662,17 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R1_done: + rep1_done: - /* Consider R2 match */ - matchptr = in_next - lzx_lru_queue_R2(QUEUE(in_next)); + /* Consider rep2 matches. */ + matchptr = in_next - lzx_lru_queue_R2(QUEUE(cur_node)); if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next)) - goto R2_done; + goto rep2_done; if (matchptr[next_len - 1] != in_next[next_len - 1]) - goto R2_done; + goto rep2_done; for (unsigned len = 2; len < next_len - 1; len++) if (matchptr[len] != in_next[len]) - goto R2_done; + goto rep2_done; do { u32 cost = cur_node->cost + c->costs.match_cost[2][ @@ -1362,36 +1688,72 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, } } while (in_next[next_len - 1] == matchptr[next_len - 1]); - R2_done: + rep2_done: while (next_len > cache_ptr->length) if (++cache_ptr == end_matches) goto done_matches; - /* Consider explicit offset matches */ - do { + /* Consider explicit offset matches. */ + for (;;) { u32 offset = cache_ptr->offset; - u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT; - unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ? - lzx_get_offset_slot_fast(c, offset) : - lzx_get_offset_slot(offset); + u32 adjusted_offset = offset + LZX_OFFSET_ADJUSTMENT; + unsigned offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit); + u32 base_cost = cur_node->cost; + u32 cost; + + #if CONSIDER_ALIGNED_COSTS + if (offset >= 16 - LZX_OFFSET_ADJUSTMENT) + base_cost += c->costs.aligned[adjusted_offset & + LZX_ALIGNED_OFFSET_BITMASK]; + #endif do { - u32 cost = cur_node->cost + - c->costs.match_cost[offset_slot][ + cost = base_cost + + c->costs.match_cost[offset_slot][ next_len - LZX_MIN_MATCH_LEN]; - #if LZX_CONSIDER_ALIGNED_COSTS - if (lzx_extra_offset_bits[offset_slot] >= - LZX_NUM_ALIGNED_OFFSET_BITS) - cost += c->costs.aligned[offset_data & - LZX_ALIGNED_OFFSET_BITMASK]; - #endif if (cost < (cur_node + next_len)->cost) { (cur_node + next_len)->cost = cost; (cur_node + next_len)->item = - (offset_data << OPTIMUM_OFFSET_SHIFT) | next_len; + (adjusted_offset << OPTIMUM_OFFSET_SHIFT) | next_len; } } while (++next_len <= cache_ptr->length); - } while (++cache_ptr != end_matches); + + if (++cache_ptr == end_matches) { + #if CONSIDER_GAP_MATCHES + /* Also consider the longest explicit + * offset match as a "gap match": match + * + lit + rep0. */ + s32 remaining = (block_end - in_next) - (s32)next_len; + if (likely(remaining >= 2)) { + const u8 *strptr = in_next + next_len; + const u8 *matchptr = strptr - offset; + if (load_u16_unaligned(strptr) == load_u16_unaligned(matchptr)) { + STATIC_ASSERT(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2 >= 250); + STATIC_ASSERT(ARRAY_LEN(queues) == ARRAY_LEN(matches_before_gap)); + unsigned limit = min(remaining, + min(ARRAY_LEN(queues) - LZX_MAX_MATCH_LEN - 2, + LZX_MAX_MATCH_LEN)); + unsigned rep0_len = lz_extend(strptr, matchptr, 2, limit); + u8 lit = strptr[-1]; + cost += c->costs.main[lit] + + c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN]; + unsigned total_len = next_len + rep0_len; + if (cost < (cur_node + total_len)->cost) { + (cur_node + total_len)->cost = cost; + (cur_node + total_len)->item = + OPTIMUM_GAP_MATCH | + ((u32)lit << OPTIMUM_OFFSET_SHIFT) | + rep0_len; + MATCH_BEFORE_GAP(cur_node + total_len) = + (adjusted_offset << OPTIMUM_OFFSET_SHIFT) | + (next_len - 1); + } + } + } + #endif /* CONSIDER_GAP_MATCHES */ + break; + } + } } done_matches: @@ -1400,177 +1762,362 @@ lzx_find_min_cost_path(struct lzx_compressor * const restrict c, * To avoid an extra branch, actually checking the preferability * of coding the literal is integrated into the queue update - * code below. */ + * code below. */ literal = *in_next++; - cost = cur_node->cost + - c->costs.main[lzx_main_symbol_for_literal(literal)]; + cost = cur_node->cost + c->costs.main[literal]; - /* Advance to the next position. */ + /* Advance to the next position. */ cur_node++; /* The lowest-cost path to the current position is now known. * Finalize the recent offsets queue that results from taking - * this lowest-cost path. */ + * this lowest-cost path. */ if (cost <= cur_node->cost) { - /* Literal: queue remains unchanged. */ + /* Literal: queue remains unchanged. */ cur_node->cost = cost; - cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1; - QUEUE(in_next) = QUEUE(in_next - 1); + cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT; + QUEUE(cur_node) = QUEUE(cur_node - 1); } else { - /* Match: queue update is needed. */ + /* Match: queue update is needed. */ unsigned len = cur_node->item & OPTIMUM_LEN_MASK; - u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT; - if (offset_data >= LZX_NUM_RECENT_OFFSETS) { - /* Explicit offset match: insert offset at front */ - QUEUE(in_next) = - lzx_lru_queue_push(QUEUE(in_next - len), - offset_data - LZX_OFFSET_ADJUSTMENT); - } else { - /* Repeat offset match: swap offset to front */ - QUEUE(in_next) = - lzx_lru_queue_swap(QUEUE(in_next - len), - offset_data); + #if CONSIDER_GAP_MATCHES + s32 adjusted_offset = (s32)cur_node->item >> OPTIMUM_OFFSET_SHIFT; + STATIC_ASSERT(OPTIMUM_GAP_MATCH == 0x80000000); /* assuming sign extension */ + #else + u32 adjusted_offset = cur_node->item >> OPTIMUM_OFFSET_SHIFT; + #endif + + if (adjusted_offset >= LZX_NUM_RECENT_OFFSETS) { + /* Explicit offset match: insert offset at front. */ + QUEUE(cur_node) = + lzx_lru_queue_push(QUEUE(cur_node - len), + adjusted_offset - LZX_OFFSET_ADJUSTMENT); + } + #if CONSIDER_GAP_MATCHES + else if (adjusted_offset < 0) { + /* "Gap match": Explicit offset match, then a + * literal, then rep0 match. Save the explicit + * offset match information in the cost field of + * the previous node, which isn't needed + * anymore. Then insert the offset at the front + * of the queue. */ + u32 match_before_gap = MATCH_BEFORE_GAP(cur_node); + (cur_node - 1)->cost = match_before_gap; + QUEUE(cur_node) = + lzx_lru_queue_push(QUEUE(cur_node - len - 1 - + (match_before_gap & OPTIMUM_LEN_MASK)), + (match_before_gap >> OPTIMUM_OFFSET_SHIFT) - + LZX_OFFSET_ADJUSTMENT); + } + #endif + else { + /* Repeat offset match: swap offset to front. */ + QUEUE(cur_node) = + lzx_lru_queue_swap(QUEUE(cur_node - len), + adjusted_offset); } } } while (cur_node != end_node); - /* Return the match offset queue at the end of the minimum cost path. */ - return QUEUE(block_end); + /* Return the recent offsets queue at the end of the path. */ + return QUEUE(cur_node); } -/* Given the costs for the main and length codewords, compute 'match_costs'. */ +/* + * Given the costs for the main and length codewords (c->costs.main and + * c->costs.len), initialize the match cost array (c->costs.match_cost) which + * directly provides the cost of every possible (length, offset slot) pair. + */ static void lzx_compute_match_costs(struct lzx_compressor *c) { - unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order); + unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) / + LZX_NUM_LEN_HEADERS; struct lzx_costs *costs = &c->costs; + unsigned main_symbol = LZX_NUM_CHARS; - for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) { - - u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST; - unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0); + for (unsigned offset_slot = 0; offset_slot < num_offset_slots; + offset_slot++) + { + u32 extra_cost = lzx_extra_offset_bits[offset_slot] * BIT_COST; unsigned i; - #if LZX_CONSIDER_ALIGNED_COSTS - if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS) - extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST; + #if CONSIDER_ALIGNED_COSTS + if (offset_slot >= 8) + extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * BIT_COST; #endif - for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) + for (i = 0; i < LZX_NUM_PRIMARY_LENS; i++) { costs->match_cost[offset_slot][i] = costs->main[main_symbol++] + extra_cost; + } - extra_cost += costs->main[main_symbol]; + extra_cost += costs->main[main_symbol++]; - for (; i < LZX_NUM_LENS; i++) + for (; i < LZX_NUM_LENS; i++) { costs->match_cost[offset_slot][i] = - costs->len[i - LZX_NUM_PRIMARY_LENS] + extra_cost; + costs->len[i - LZX_NUM_PRIMARY_LENS] + + extra_cost; + } } } -/* Set default LZX Huffman symbol costs to bootstrap the iterative optimization - * algorithm. */ -static void -lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size) +/* + * Fast approximation for log2f(x). This is not as accurate as the standard C + * version. It does not need to be perfectly accurate because it is only used + * for estimating symbol costs, which is very approximate anyway. + */ +static float +log2f_fast(float x) { - u32 i; - bool have_byte[256]; - unsigned num_used_bytes; + union { + float f; + s32 i; + } u = { .f = x }; - /* The costs below are hard coded to use a scaling factor of 16. */ - BUILD_BUG_ON(LZX_BIT_COST != 16); + /* Extract the exponent and subtract 127 to remove the bias. This gives + * the integer part of the result. */ + float res = ((u.i >> 23) & 0xFF) - 127; + /* Set the exponent to 0 (plus bias of 127). This transforms the number + * to the range [1, 2) while retaining the same mantissa. */ + u.i = (u.i & ~(0xFF << 23)) | (127 << 23); + + /* + * Approximate the log2 of the transformed number using a degree 2 + * interpolating polynomial for log2(x) over the interval [1, 2). Then + * add this to the extracted exponent to produce the final approximation + * of log2(x). + * + * The coefficients of the interpolating polynomial used here were found + * using the script tools/log2_interpolation.r. + */ + return res - 1.653124006f + u.f * (1.9941812f - u.f * 0.3347490189f); + +} + +/* + * Return the estimated cost of a symbol which has been estimated to have the + * given probability. + */ +static u32 +lzx_cost_for_probability(float prob) +{ /* - * Heuristics: + * The basic formula is: * - * - Use smaller initial costs for literal symbols when the input buffer - * contains fewer distinct bytes. + * entropy = -log2(probability) * - * - Assume that match symbols are more costly than literal symbols. + * Use this to get the cost in fractional bits. Then multiply by our + * scaling factor of BIT_COST and truncate to a u32. * - * - Assume that length symbols for shorter lengths are less costly than - * length symbols for longer lengths. + * In addition, the minimum cost is BIT_COST (one bit) because the + * entropy coding method will be Huffman codes. */ + u32 cost = -log2f_fast(prob) * BIT_COST; + return max(cost, BIT_COST); +} - for (i = 0; i < 256; i++) - have_byte[i] = false; +/* + * Mapping: number of used literals => heuristic probability of a literal times + * 6870. Generated by running this R command: + * + * cat(paste(round(6870*2^-((304+(0:256))/64)), collapse=", ")) + */ +static const u8 literal_scaled_probs[257] = { + 255, 253, 250, 247, 244, 242, 239, 237, 234, 232, 229, 227, 224, 222, + 219, 217, 215, 212, 210, 208, 206, 203, 201, 199, 197, 195, 193, 191, + 189, 186, 184, 182, 181, 179, 177, 175, 173, 171, 169, 167, 166, 164, + 162, 160, 159, 157, 155, 153, 152, 150, 149, 147, 145, 144, 142, 141, + 139, 138, 136, 135, 133, 132, 130, 129, 128, 126, 125, 124, 122, 121, + 120, 118, 117, 116, 115, 113, 112, 111, 110, 109, 107, 106, 105, 104, + 103, 102, 101, 100, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, + 86, 85, 84, 83, 82, 81, 80, 79, 78, 78, 77, 76, 75, 74, 73, 73, 72, 71, + 70, 70, 69, 68, 67, 67, 66, 65, 65, 64, 63, 62, 62, 61, 60, 60, 59, 59, + 58, 57, 57, 56, 55, 55, 54, 54, 53, 53, 52, 51, 51, 50, 50, 49, 49, 48, + 48, 47, 47, 46, 46, 45, 45, 44, 44, 43, 43, 42, 42, 41, 41, 40, 40, 40, + 39, 39, 38, 38, 38, 37, 37, 36, 36, 36, 35, 35, 34, 34, 34, 33, 33, 33, + 32, 32, 32, 31, 31, 31, 30, 30, 30, 29, 29, 29, 28, 28, 28, 27, 27, 27, + 27, 26, 26, 26, 25, 25, 25, 25, 24, 24, 24, 24, 23, 23, 23, 23, 22, 22, + 22, 22, 21, 21, 21, 21, 20, 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18, + 18, 18, 18, 17, 17, 17, 17, 17, 16, 16, 16, 16 +}; - for (i = 0; i < block_size; i++) - have_byte[block[i]] = true; +/* + * Mapping: length symbol => default cost of that symbol. This is derived from + * sample data but has been slightly edited to add more bias towards the + * shortest lengths, which are the most common. + */ +static const u16 lzx_default_len_costs[LZX_LENCODE_NUM_SYMBOLS] = { + 300, 310, 320, 330, 360, 396, 399, 416, 451, 448, 463, 466, 505, 492, + 503, 514, 547, 531, 566, 561, 589, 563, 592, 586, 623, 602, 639, 627, + 659, 643, 657, 650, 685, 662, 661, 672, 685, 686, 696, 680, 657, 682, + 666, 699, 674, 699, 679, 709, 688, 712, 692, 714, 694, 716, 698, 712, + 706, 727, 714, 727, 713, 723, 712, 718, 719, 719, 720, 735, 725, 735, + 728, 740, 727, 739, 727, 742, 716, 733, 733, 740, 738, 746, 737, 747, + 738, 745, 736, 748, 742, 749, 745, 749, 743, 748, 741, 752, 745, 752, + 747, 750, 747, 752, 748, 753, 750, 752, 753, 753, 749, 744, 752, 755, + 753, 756, 745, 748, 746, 745, 723, 757, 755, 758, 755, 758, 752, 757, + 754, 757, 755, 759, 755, 758, 753, 755, 755, 758, 757, 761, 755, 750, + 758, 759, 759, 760, 758, 751, 757, 757, 759, 759, 758, 759, 758, 761, + 750, 761, 758, 760, 759, 761, 758, 761, 760, 752, 759, 760, 759, 759, + 757, 762, 760, 761, 761, 748, 761, 760, 762, 763, 752, 762, 762, 763, + 762, 762, 763, 763, 762, 763, 762, 763, 762, 763, 763, 764, 763, 762, + 763, 762, 762, 762, 764, 764, 763, 764, 763, 763, 763, 762, 763, 763, + 762, 764, 764, 763, 762, 763, 763, 763, 763, 762, 764, 763, 762, 764, + 764, 763, 763, 765, 764, 764, 762, 763, 764, 765, 763, 764, 763, 764, + 762, 764, 764, 754, 763, 764, 763, 763, 762, 763, 584, +}; - num_used_bytes = 0; - for (i = 0; i < 256; i++) - num_used_bytes += have_byte[i]; +/* Set default costs to bootstrap the iterative optimization algorithm. */ +static void +lzx_set_default_costs(struct lzx_compressor *c) +{ + unsigned i; + u32 num_literals = 0; + u32 num_used_literals = 0; + float inv_num_matches = 1.0f / c->freqs.main[LZX_NUM_CHARS]; + float inv_num_items; + float prob_match = 1.0f; + u32 match_cost; + float base_literal_prob; + + /* Some numbers here have been hardcoded to assume a bit cost of 64. */ + STATIC_ASSERT(BIT_COST == 64); + + /* Estimate the number of literals that will used. 'num_literals' is + * the total number, whereas 'num_used_literals' is the number of + * distinct symbols. */ + for (i = 0; i < LZX_NUM_CHARS; i++) { + num_literals += c->freqs.main[i]; + num_used_literals += (c->freqs.main[i] != 0); + } - for (i = 0; i < 256; i++) - c->costs.main[i] = 140 - (256 - num_used_bytes) / 4; + /* Note: all match headers were tallied as symbol 'LZX_NUM_CHARS'. We + * don't attempt to estimate which ones will be used. */ + + inv_num_items = 1.0f / (num_literals + c->freqs.main[LZX_NUM_CHARS]); + base_literal_prob = literal_scaled_probs[num_used_literals] * + (1.0f / 6870.0f); + + /* Literal costs. We use two different methods to compute the + * probability of each literal and mix together their results. */ + for (i = 0; i < LZX_NUM_CHARS; i++) { + u32 freq = c->freqs.main[i]; + if (freq != 0) { + float prob = 0.5f * ((freq * inv_num_items) + + base_literal_prob); + c->costs.main[i] = lzx_cost_for_probability(prob); + prob_match -= prob; + } else { + c->costs.main[i] = 11 * BIT_COST; + } + } + /* Match header costs. We just assume that all match headers are + * equally probable, but we do take into account the relative cost of a + * match header vs. a literal depending on how common matches are + * expected to be vs. literals. */ + prob_match = max(prob_match, 0.15f); + match_cost = lzx_cost_for_probability(prob_match / (c->num_main_syms - + LZX_NUM_CHARS)); for (; i < c->num_main_syms; i++) - c->costs.main[i] = 170; + c->costs.main[i] = match_cost; + /* Length symbol costs. These are just set to fixed values which + * reflect the fact the smallest lengths are typically the most common, + * and therefore are typically the cheapest. */ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) - c->costs.len[i] = 103 + (i / 4); - -#if LZX_CONSIDER_ALIGNED_COSTS - for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) - c->costs.aligned[i] = LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST; + c->costs.len[i] = lzx_default_len_costs[i]; + +#if CONSIDER_ALIGNED_COSTS + /* Aligned offset symbol costs. These are derived from the estimated + * probability of each aligned offset symbol. */ + for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { + /* We intentionally tallied the frequencies in the wrong slots, + * not accounting for LZX_OFFSET_ADJUSTMENT, since doing the + * fixup here is faster: a constant 8 subtractions here vs. one + * addition for every match. */ + unsigned j = (i - LZX_OFFSET_ADJUSTMENT) & LZX_ALIGNED_OFFSET_BITMASK; + if (c->freqs.aligned[j] != 0) { + float prob = c->freqs.aligned[j] * inv_num_matches; + c->costs.aligned[i] = lzx_cost_for_probability(prob); + } else { + c->costs.aligned[i] = + (2 * LZX_NUM_ALIGNED_OFFSET_BITS) * BIT_COST; + } + } #endif - - lzx_compute_match_costs(c); } /* Update the current cost model to reflect the computed Huffman codes. */ static void -lzx_update_costs(struct lzx_compressor *c) +lzx_set_costs_from_codes(struct lzx_compressor *c) { unsigned i; const struct lzx_lens *lens = &c->codes[c->codes_index].lens; - for (i = 0; i < c->num_main_syms; i++) - c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST; + for (i = 0; i < c->num_main_syms; i++) { + c->costs.main[i] = (lens->main[i] ? lens->main[i] : + MAIN_CODEWORD_LIMIT) * BIT_COST; + } - for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) - c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST; + for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) { + c->costs.len[i] = (lens->len[i] ? lens->len[i] : + LENGTH_CODEWORD_LIMIT) * BIT_COST; + } -#if LZX_CONSIDER_ALIGNED_COSTS - for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) - c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST; +#if CONSIDER_ALIGNED_COSTS + for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) { + c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : + ALIGNED_CODEWORD_LIMIT) * BIT_COST; + } #endif - - lzx_compute_match_costs(c); } -static struct lzx_lru_queue -lzx_optimize_and_write_block(struct lzx_compressor *c, - struct lzx_output_bitstream *os, - const u8 *block_begin, const u32 block_size, - const struct lzx_lru_queue initial_queue) +/* + * Choose a "near-optimal" literal/match sequence to use for the current block, + * then flush the block. Because the cost of each Huffman symbol is unknown + * until the Huffman codes have been built and the Huffman codes themselves + * depend on the symbol frequencies, this uses an iterative optimization + * algorithm to approximate an optimal solution. The first optimization pass + * for the block uses default costs; additional passes use costs derived from + * the Huffman codes computed in the previous pass. + */ +static inline struct lzx_lru_queue +lzx_optimize_and_flush_block(struct lzx_compressor * const restrict c, + struct lzx_output_bitstream * const restrict os, + const u8 * const restrict block_begin, + const u32 block_size, + const struct lzx_lru_queue initial_queue, + bool is_16_bit) { unsigned num_passes_remaining = c->num_optim_passes; - struct lzx_item *next_chosen_item; struct lzx_lru_queue new_queue; + u32 seq_idx; - /* The first optimization pass uses a default cost model. Each - * additional optimization pass uses a cost model derived from the - * Huffman code computed in the previous pass. */ + lzx_set_default_costs(c); - lzx_set_default_costs(c, block_begin, block_size); - lzx_reset_symbol_frequencies(c); - do { + for (;;) { + lzx_compute_match_costs(c); new_queue = lzx_find_min_cost_path(c, block_begin, block_size, - initial_queue); - if (num_passes_remaining > 1) { - lzx_tally_item_list(c, c->optimum_nodes + block_size); - lzx_make_huffman_codes(c); - lzx_update_costs(c); - lzx_reset_symbol_frequencies(c); - } - } while (--num_passes_remaining); + initial_queue, is_16_bit); + + if (--num_passes_remaining == 0) + break; + + /* At least one optimization pass remains. Update the costs. */ + lzx_reset_symbol_frequencies(c); + lzx_tally_item_list(c, block_size, is_16_bit); + lzx_build_huffman_codes(c); + lzx_set_costs_from_codes(c); + } - next_chosen_item = c->chosen_items; - lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item); - lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items); + /* Done optimizing. Generate the sequence list and flush the block. */ + lzx_reset_symbol_frequencies(c); + seq_idx = lzx_record_item_list(c, block_size, is_16_bit); + lzx_flush_block(c, os, block_begin, block_size, seq_idx); return new_queue; } @@ -1578,7 +2125,7 @@ lzx_optimize_and_write_block(struct lzx_compressor *c, * This is the "near-optimal" LZX compressor. * * For each block, it performs a relatively thorough graph search to find an - * inexpensive (in terms of compressed size) way to output that block. + * inexpensive (in terms of compressed size) way to output the block. * * Note: there are actually many things this algorithm leaves on the table in * terms of compression ratio. So although it may be "near-optimal", it is @@ -1587,193 +2134,354 @@ lzx_optimize_and_write_block(struct lzx_compressor *c, * time, but rather to produce a compression ratio significantly better than a * simpler "greedy" or "lazy" parse while still being relatively fast. */ -static void -lzx_compress_near_optimal(struct lzx_compressor *c, - struct lzx_output_bitstream *os) +static inline void +lzx_compress_near_optimal(struct lzx_compressor * restrict c, + const u8 * const restrict in_begin, size_t in_nbytes, + struct lzx_output_bitstream * restrict os, + bool is_16_bit) { - const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; - const u8 * const in_end = in_begin + c->in_nbytes; - unsigned max_len = LZX_MAX_MATCH_LEN; - unsigned nice_len = min(c->nice_match_length, max_len); - u32 next_hash; - struct lzx_lru_queue queue; + const u8 * const in_end = in_begin + in_nbytes; + u32 max_len = LZX_MAX_MATCH_LEN; + u32 nice_len = min(c->nice_match_length, max_len); + u32 next_hashes[2] = {0, 0}; + struct lzx_lru_queue queue = LZX_QUEUE_INITIALIZER; - bt_matchfinder_init(&c->bt_mf); - matchfinder_init(c->hash2_tab, LZX_HASH2_LENGTH); - next_hash = bt_matchfinder_hash_3_bytes(in_next); - lzx_lru_queue_init(&queue); + /* Initialize the matchfinder. */ + CALL_BT_MF(is_16_bit, c, bt_matchfinder_init); do { - /* Starting a new block */ - const u8 * const in_block_begin = in_next; - const u8 * const in_block_end = - in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next); + /* Starting a new block */ - /* Run the block through the matchfinder and cache the matches. */ + const u8 * const in_block_begin = in_next; + const u8 * const in_max_block_end = + in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next); struct lz_match *cache_ptr = c->match_cache; - do { - struct lz_match *lz_matchptr; - u32 hash2; - pos_t cur_match; - unsigned best_len; + const u8 *next_search_pos = in_next; + const u8 *next_observation = in_next; + const u8 *next_pause_point = + min(in_next + min(MIN_BLOCK_SIZE, + in_max_block_end - in_next), + in_max_block_end - min(LZX_MAX_MATCH_LEN - 1, + in_max_block_end - in_next)); + + lzx_init_block_split_stats(&c->split_stats); + lzx_reset_symbol_frequencies(c); - /* If approaching the end of the input buffer, adjust - * 'max_len' and 'nice_len' accordingly. */ - if (unlikely(max_len > in_end - in_next)) { - max_len = in_end - in_next; - nice_len = min(max_len, nice_len); + if (in_next >= next_pause_point) + goto pause; - /* This extra check is needed to ensure that we - * never output a length 2 match of the very - * last two bytes with the very first two bytes, - * since such a match has an offset too large to - * be represented. */ - if (unlikely(max_len < 3)) { - in_next++; + /* + * Run the input buffer through the matchfinder, caching the + * matches, until we decide to end the block. + * + * For a tighter matchfinding loop, we compute a "pause point", + * which is the next position at which we may need to check + * whether to end the block or to decrease max_len. We then + * only do these extra checks upon reaching the pause point. + */ + resume_matchfinding: + do { + if (in_next >= next_search_pos) { + /* Search for matches at this position. */ + struct lz_match *lz_matchptr; + u32 best_len; + + lz_matchptr = CALL_BT_MF(is_16_bit, c, + bt_matchfinder_get_matches, + in_begin, + in_next - in_begin, + max_len, + nice_len, + c->max_search_depth, + next_hashes, + &best_len, + cache_ptr + 1); + cache_ptr->length = lz_matchptr - (cache_ptr + 1); + cache_ptr = lz_matchptr; + + /* Accumulate literal/match statistics for block + * splitting and for generating the initial cost + * model. */ + if (in_next >= next_observation) { + best_len = cache_ptr[-1].length; + if (best_len >= 3) { + /* Match (len >= 3) */ + + /* + * Note: for performance reasons this has + * been simplified significantly: + * + * - We wait until later to account for + * LZX_OFFSET_ADJUSTMENT. + * - We don't account for repeat offsets. + * - We don't account for different match headers. + */ + c->freqs.aligned[cache_ptr[-1].offset & + LZX_ALIGNED_OFFSET_BITMASK]++; + c->freqs.main[LZX_NUM_CHARS]++; + + lzx_observe_match(&c->split_stats, best_len); + next_observation = in_next + best_len; + } else { + /* Literal */ + c->freqs.main[*in_next]++; + lzx_observe_literal(&c->split_stats, *in_next); + next_observation = in_next + 1; + } + } + + /* + * If there was a very long match found, then + * don't cache any matches for the bytes covered + * by that match. This avoids degenerate + * behavior when compressing highly redundant + * data, where the number of matches can be very + * large. + * + * This heuristic doesn't actually hurt the + * compression ratio *too* much. If there's a + * long match, then the data must be highly + * compressible, so it doesn't matter as much + * what we do. + */ + if (best_len >= nice_len) + next_search_pos = in_next + best_len; + } else { + /* Don't search for matches at this position. */ + CALL_BT_MF(is_16_bit, c, + bt_matchfinder_skip_position, + in_begin, + in_next - in_begin, + nice_len, + c->max_search_depth, + next_hashes); + cache_ptr->length = 0; + cache_ptr++; + } + } while (++in_next < next_pause_point && + likely(cache_ptr < &c->match_cache[CACHE_LENGTH])); + + pause: + + /* Adjust max_len and nice_len if we're nearing the end of the + * input buffer. In addition, if we are so close to the end of + * the input buffer that there cannot be any more matches, then + * just advance through the last few positions and record no + * matches. */ + if (unlikely(max_len > in_end - in_next)) { + max_len = in_end - in_next; + nice_len = min(max_len, nice_len); + if (max_len < BT_MATCHFINDER_REQUIRED_NBYTES) { + while (in_next != in_end) { cache_ptr->length = 0; cache_ptr++; - continue; + in_next++; } } + } - lz_matchptr = cache_ptr + 1; + /* End the block if the match cache may overflow. */ + if (unlikely(cache_ptr >= &c->match_cache[CACHE_LENGTH])) + goto end_block; + + /* End the block if the soft maximum size has been reached. */ + if (in_next >= in_max_block_end) + goto end_block; + + /* End the block if the block splitting algorithm thinks this is + * a good place to do so. */ + if (c->split_stats.num_new_observations >= + NUM_OBSERVATIONS_PER_BLOCK_CHECK && + in_max_block_end - in_next >= MIN_BLOCK_SIZE && + lzx_should_end_block(&c->split_stats)) + goto end_block; + + /* It's not time to end the block yet. Compute the next pause + * point and resume matchfinding. */ + next_pause_point = + min(in_next + min(NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 - + c->split_stats.num_new_observations, + in_max_block_end - in_next), + in_max_block_end - min(LZX_MAX_MATCH_LEN - 1, + in_max_block_end - in_next)); + goto resume_matchfinding; + + end_block: + /* We've decided on a block boundary and cached matches. Now + * choose a match/literal sequence and flush the block. */ + queue = lzx_optimize_and_flush_block(c, os, in_block_begin, + in_next - in_block_begin, + queue, is_16_bit); + } while (in_next != in_end); +} - /* Check for a length 2 match. */ - hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER); - cur_match = c->hash2_tab[hash2]; - c->hash2_tab[hash2] = in_next - in_begin; - if (matchfinder_node_valid(cur_match) && - (LZX_HASH2_ORDER == 16 || - load_u16_unaligned(&in_begin[cur_match]) == - load_u16_unaligned(in_next))) - { - lz_matchptr->length = 2; - lz_matchptr->offset = in_next - &in_begin[cur_match]; - lz_matchptr++; - } +static void +lzx_compress_near_optimal_16(struct lzx_compressor *c, const u8 *in, + size_t in_nbytes, struct lzx_output_bitstream *os) +{ + lzx_compress_near_optimal(c, in, in_nbytes, os, true); +} - /* Check for matches of length >= 3. */ - lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf, - in_begin, - in_next, - 3, - max_len, - nice_len, - c->max_search_depth, - &next_hash, - &best_len, - lz_matchptr); - in_next++; - cache_ptr->length = lz_matchptr - (cache_ptr + 1); - cache_ptr = lz_matchptr; +static void +lzx_compress_near_optimal_32(struct lzx_compressor *c, const u8 *in, + size_t in_nbytes, struct lzx_output_bitstream *os) +{ + lzx_compress_near_optimal(c, in, in_nbytes, os, false); +} - /* - * If there was a very long match found, then don't - * cache any matches for the bytes covered by that - * match. This avoids degenerate behavior when - * compressing highly redundant data, where the number - * of matches can be very large. - * - * This heuristic doesn't actually hurt the compression - * ratio very much. If there's a long match, then the - * data must be highly compressible, so it doesn't - * matter as much what we do. - */ - if (best_len >= nice_len) { - --best_len; - do { - if (unlikely(max_len > in_end - in_next)) { - max_len = in_end - in_next; - nice_len = min(max_len, nice_len); - if (unlikely(max_len < 3)) { - in_next++; - cache_ptr->length = 0; - cache_ptr++; - continue; - } - } - c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] = - in_next - in_begin; - bt_matchfinder_skip_position(&c->bt_mf, - in_begin, - in_next, - in_end, - nice_len, - c->max_search_depth, - &next_hash); - in_next++; - cache_ptr->length = 0; - cache_ptr++; - } while (--best_len); - } - } while (in_next < in_block_end && - likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH])); +/******************************************************************************/ +/* Faster ("lazy") compression algorithm */ +/*----------------------------------------------------------------------------*/ + +/* + * Called when the compressor chooses to use a literal. This tallies the + * Huffman symbol for the literal, increments the current literal run length, + * and "observes" the literal for the block split statistics. + */ +static inline void +lzx_choose_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p) +{ + lzx_observe_literal(&c->split_stats, literal); + c->freqs.main[literal]++; + ++*litrunlen_p; +} - /* We've finished running the block through the matchfinder. - * Now choose a match/literal sequence and write the block. */ +/* + * Called when the compressor chooses to use a match. This tallies the Huffman + * symbol(s) for a match, saves the match data and the length of the preceding + * literal run, updates the recent offsets queue, and "observes" the match for + * the block split statistics. + */ +static inline void +lzx_choose_match(struct lzx_compressor *c, unsigned length, u32 adjusted_offset, + u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit, + u32 *litrunlen_p, struct lzx_sequence **next_seq_p) +{ + u32 litrunlen = *litrunlen_p; + struct lzx_sequence *next_seq = *next_seq_p; + unsigned offset_slot; + unsigned v; - queue = lzx_optimize_and_write_block(c, os, in_block_begin, - in_next - in_block_begin, - queue); - } while (in_next != in_end); + lzx_observe_match(&c->split_stats, length); + + v = length - LZX_MIN_MATCH_LEN; + + /* Save the literal run length and adjusted length. */ + next_seq->litrunlen = litrunlen; + next_seq->adjusted_length = v; + + /* Compute the length header, then tally the length symbol if needed. */ + if (v >= LZX_NUM_PRIMARY_LENS) { + c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++; + v = LZX_NUM_PRIMARY_LENS; + } + + /* Compute the offset slot. */ + offset_slot = lzx_get_offset_slot(c, adjusted_offset, is_16_bit); + + /* Compute the match header. */ + v += offset_slot * LZX_NUM_LEN_HEADERS; + + /* Save the adjusted offset and match header. */ + next_seq->adjusted_offset_and_match_hdr = (adjusted_offset << 9) | v; + + /* Tally the main symbol. */ + c->freqs.main[LZX_NUM_CHARS + v]++; + + /* Update the recent offsets queue. */ + if (adjusted_offset < LZX_NUM_RECENT_OFFSETS) { + /* Repeat offset match. */ + swap(recent_offsets[0], recent_offsets[adjusted_offset]); + } else { + /* Explicit offset match. */ + + /* Tally the aligned offset symbol if needed. */ + if (adjusted_offset >= 16) + c->freqs.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]++; + + recent_offsets[2] = recent_offsets[1]; + recent_offsets[1] = recent_offsets[0]; + recent_offsets[0] = adjusted_offset - LZX_OFFSET_ADJUSTMENT; + } + + /* Reset the literal run length and advance to the next sequence. */ + *next_seq_p = next_seq + 1; + *litrunlen_p = 0; } /* - * Given a pointer to the current byte sequence and the current list of recent - * match offsets, find the longest repeat offset match. - * - * If no match of at least 2 bytes is found, then return 0. + * Called when the compressor ends a block. This finshes the last lzx_sequence, + * which is just a literal run with no following match. This literal run might + * be empty. + */ +static inline void +lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen) +{ + last_seq->litrunlen = litrunlen; + + /* Special value to mark last sequence */ + last_seq->adjusted_offset_and_match_hdr = 0x80000000; +} + +/* + * Find the longest repeat offset match with the current position. If a match + * is found, return its length and set *best_rep_idx_ret to the index of its + * offset in @recent_offsets. Otherwise, return 0. * - * If a match of at least 2 bytes is found, then return its length and set - * *rep_max_idx_ret to the index of its offset in @queue. -*/ + * Don't bother with length 2 matches; consider matches of length >= 3 only. + * Also assume that max_len >= 3. + */ static unsigned lzx_find_longest_repeat_offset_match(const u8 * const in_next, - const u32 bytes_remaining, - struct lzx_lru_queue queue, - unsigned *rep_max_idx_ret) + const u32 recent_offsets[], + const unsigned max_len, + unsigned *best_rep_idx_ret) { - BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3); - LZX_ASSERT(bytes_remaining >= 2); + STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); /* loop is unrolled */ - const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN); - const u16 next_2_bytes = load_u16_unaligned(in_next); + const u32 seq3 = load_u24_unaligned(in_next); const u8 *matchptr; - unsigned rep_max_len; - unsigned rep_max_idx; + unsigned best_rep_len = 0; + unsigned best_rep_idx = 0; unsigned rep_len; - matchptr = in_next - lzx_lru_queue_pop(&queue); - if (load_u16_unaligned(matchptr) == next_2_bytes) - rep_max_len = lz_extend(in_next, matchptr, 2, max_len); - else - rep_max_len = 0; - rep_max_idx = 0; - - matchptr = in_next - lzx_lru_queue_pop(&queue); - if (load_u16_unaligned(matchptr) == next_2_bytes) { - rep_len = lz_extend(in_next, matchptr, 2, max_len); - if (rep_len > rep_max_len) { - rep_max_len = rep_len; - rep_max_idx = 1; + /* Check for rep0 match (most recent offset) */ + matchptr = in_next - recent_offsets[0]; + if (load_u24_unaligned(matchptr) == seq3) + best_rep_len = lz_extend(in_next, matchptr, 3, max_len); + + /* Check for rep1 match (second most recent offset) */ + matchptr = in_next - recent_offsets[1]; + if (load_u24_unaligned(matchptr) == seq3) { + rep_len = lz_extend(in_next, matchptr, 3, max_len); + if (rep_len > best_rep_len) { + best_rep_len = rep_len; + best_rep_idx = 1; } } - matchptr = in_next - lzx_lru_queue_pop(&queue); - if (load_u16_unaligned(matchptr) == next_2_bytes) { - rep_len = lz_extend(in_next, matchptr, 2, max_len); - if (rep_len > rep_max_len) { - rep_max_len = rep_len; - rep_max_idx = 2; + /* Check for rep2 match (third most recent offset) */ + matchptr = in_next - recent_offsets[2]; + if (load_u24_unaligned(matchptr) == seq3) { + rep_len = lz_extend(in_next, matchptr, 3, max_len); + if (rep_len > best_rep_len) { + best_rep_len = rep_len; + best_rep_idx = 2; } } - *rep_max_idx_ret = rep_max_idx; - return rep_max_len; + *best_rep_idx_ret = best_rep_idx; + return best_rep_len; } -/* Fast heuristic scoring for lazy parsing: how "good" is this match? */ +/* + * Fast heuristic scoring for lazy parsing: how "good" is this match? + * This is mainly determined by the length: longer matches are better. + * However, we also give a bonus to close (small offset) matches and to repeat + * offset matches, since those require fewer bits to encode. + */ + static inline unsigned lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset) { @@ -1781,7 +2489,6 @@ lzx_explicit_offset_match_score(unsigned len, u32 adjusted_offset) if (adjusted_offset < 4096) score++; - if (adjusted_offset < 256) score++; @@ -1794,223 +2501,303 @@ lzx_repeat_offset_match_score(unsigned rep_len, unsigned rep_idx) return rep_len + 3; } -/* This is the "lazy" LZX compressor. */ -static void -lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os) +/* + * This is the "lazy" LZX compressor. The basic idea is that before it chooses + * a match, it checks to see if there's a longer match at the next position. If + * yes, it chooses a literal and continues to the next position. If no, it + * chooses the match. + * + * Some additional heuristics are used as well. Repeat offset matches are + * considered favorably and sometimes are chosen immediately. In addition, long + * matches (at least "nice_len" bytes) are chosen immediately as well. Finally, + * when we decide whether a match is "better" than another, we take the offset + * into consideration as well as the length. + */ +static inline void +lzx_compress_lazy(struct lzx_compressor * restrict c, + const u8 * const restrict in_begin, size_t in_nbytes, + struct lzx_output_bitstream * restrict os, bool is_16_bit) { - const u8 * const in_begin = c->in_buffer; const u8 * in_next = in_begin; - const u8 * const in_end = in_begin + c->in_nbytes; + const u8 * const in_end = in_begin + in_nbytes; unsigned max_len = LZX_MAX_MATCH_LEN; unsigned nice_len = min(c->nice_match_length, max_len); - struct lzx_lru_queue queue; + STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3); + u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1}; + u32 next_hashes[2] = {0, 0}; - hc_matchfinder_init(&c->hc_mf); - lzx_lru_queue_init(&queue); + /* Initialize the matchfinder. */ + CALL_HC_MF(is_16_bit, c, hc_matchfinder_init); do { - /* Starting a new block */ + /* Starting a new block */ const u8 * const in_block_begin = in_next; - const u8 * const in_block_end = - in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next); - struct lzx_item *next_chosen_item = c->chosen_items; + const u8 * const in_max_block_end = + in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next); + struct lzx_sequence *next_seq = c->chosen_sequences; + u32 litrunlen = 0; unsigned cur_len; u32 cur_offset; - u32 cur_offset_data; + u32 cur_adjusted_offset; unsigned cur_score; unsigned next_len; u32 next_offset; - u32 next_offset_data; + u32 next_adjusted_offset; unsigned next_score; - unsigned rep_max_len; - unsigned rep_max_idx; + unsigned best_rep_len; + unsigned best_rep_idx; unsigned rep_score; unsigned skip_len; lzx_reset_symbol_frequencies(c); + lzx_init_block_split_stats(&c->split_stats); do { + /* Adjust max_len and nice_len if we're nearing the end + * of the input buffer. */ if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); } - /* Find the longest match at the current position. */ - - cur_len = hc_matchfinder_longest_match(&c->hc_mf, - in_begin, - in_next, - 2, - max_len, - nice_len, - c->max_search_depth, - &cur_offset); + /* Find the longest match (subject to the + * max_search_depth cutoff parameter) with the current + * position. Don't bother with length 2 matches; only + * look for matches of length >= 3. */ + cur_len = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_longest_match, + in_begin, + in_next - in_begin, + 2, + max_len, + nice_len, + c->max_search_depth, + next_hashes, + &cur_offset); + + /* If there was no match found, or the only match found + * was a distant short match, then choose a literal. */ if (cur_len < 3 || (cur_len == 3 && cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT && - cur_offset != lzx_lru_queue_R0(queue) && - cur_offset != lzx_lru_queue_R1(queue) && - cur_offset != lzx_lru_queue_R2(queue))) + cur_offset != recent_offsets[0] && + cur_offset != recent_offsets[1] && + cur_offset != recent_offsets[2])) { - /* There was no match found, or the only match found - * was a distant length 3 match. Output a literal. */ - lzx_declare_literal(c, *in_next++, - &next_chosen_item); + lzx_choose_literal(c, *in_next, &litrunlen); + in_next++; continue; } - if (cur_offset == lzx_lru_queue_R0(queue)) { + /* Heuristic: if this match has the most recent offset, + * then go ahead and choose it as a rep0 match. */ + if (cur_offset == recent_offsets[0]) { in_next++; - cur_offset_data = 0; skip_len = cur_len - 1; + cur_adjusted_offset = 0; goto choose_cur_match; } - cur_offset_data = cur_offset + LZX_OFFSET_ADJUSTMENT; - cur_score = lzx_explicit_offset_match_score(cur_len, cur_offset_data); - - /* Consider a repeat offset match */ - rep_max_len = lzx_find_longest_repeat_offset_match(in_next, - in_end - in_next, - queue, - &rep_max_idx); + /* Compute the longest match's score as an explicit + * offset match. */ + cur_adjusted_offset = cur_offset + LZX_OFFSET_ADJUSTMENT; + cur_score = lzx_explicit_offset_match_score(cur_len, cur_adjusted_offset); + + /* Find the longest repeat offset match at this + * position. If we find one and it's "better" than the + * explicit offset match we found, then go ahead and + * choose the repeat offset match immediately. */ + best_rep_len = lzx_find_longest_repeat_offset_match(in_next, + recent_offsets, + max_len, + &best_rep_idx); in_next++; - if (rep_max_len >= 3 && - (rep_score = lzx_repeat_offset_match_score(rep_max_len, - rep_max_idx)) >= cur_score) + if (best_rep_len != 0 && + (rep_score = lzx_repeat_offset_match_score(best_rep_len, + best_rep_idx)) >= cur_score) { - cur_len = rep_max_len; - cur_offset_data = rep_max_idx; - skip_len = rep_max_len - 1; + cur_len = best_rep_len; + cur_adjusted_offset = best_rep_idx; + skip_len = best_rep_len - 1; goto choose_cur_match; } have_cur_match: + /* + * We have a match at the current position. If the + * match is very long, then choose it immediately. + * Otherwise, see if there's a better match at the next + * position. + */ - /* We have a match at the current position. */ - - /* If we have a very long match, choose it immediately. */ if (cur_len >= nice_len) { skip_len = cur_len - 1; goto choose_cur_match; } - /* See if there's a better match at the next position. */ - if (unlikely(max_len > in_end - in_next)) { max_len = in_end - in_next; nice_len = min(max_len, nice_len); } - next_len = hc_matchfinder_longest_match(&c->hc_mf, - in_begin, - in_next, - cur_len - 2, - max_len, - nice_len, - c->max_search_depth / 2, - &next_offset); + next_len = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_longest_match, + in_begin, + in_next - in_begin, + cur_len - 2, + max_len, + nice_len, + c->max_search_depth / 2, + next_hashes, + &next_offset); if (next_len <= cur_len - 2) { + /* No potentially better match was found. */ in_next++; skip_len = cur_len - 2; goto choose_cur_match; } - next_offset_data = next_offset + LZX_OFFSET_ADJUSTMENT; - next_score = lzx_explicit_offset_match_score(next_len, next_offset_data); + next_adjusted_offset = next_offset + LZX_OFFSET_ADJUSTMENT; + next_score = lzx_explicit_offset_match_score(next_len, next_adjusted_offset); - rep_max_len = lzx_find_longest_repeat_offset_match(in_next, - in_end - in_next, - queue, - &rep_max_idx); + best_rep_len = lzx_find_longest_repeat_offset_match(in_next, + recent_offsets, + max_len, + &best_rep_idx); in_next++; - if (rep_max_len >= 3 && - (rep_score = lzx_repeat_offset_match_score(rep_max_len, - rep_max_idx)) >= next_score) + if (best_rep_len != 0 && + (rep_score = lzx_repeat_offset_match_score(best_rep_len, + best_rep_idx)) >= next_score) { if (rep_score > cur_score) { /* The next match is better, and it's a - * repeat offset match. */ - lzx_declare_literal(c, *(in_next - 2), - &next_chosen_item); - cur_len = rep_max_len; - cur_offset_data = rep_max_idx; + * repeat offset match. */ + lzx_choose_literal(c, *(in_next - 2), + &litrunlen); + cur_len = best_rep_len; + cur_adjusted_offset = best_rep_idx; skip_len = cur_len - 1; goto choose_cur_match; } } else { if (next_score > cur_score) { /* The next match is better, and it's an - * explicit offset match. */ - lzx_declare_literal(c, *(in_next - 2), - &next_chosen_item); + * explicit offset match. */ + lzx_choose_literal(c, *(in_next - 2), + &litrunlen); cur_len = next_len; - cur_offset_data = next_offset_data; + cur_adjusted_offset = next_adjusted_offset; cur_score = next_score; goto have_cur_match; } } - /* The original match was better. */ + /* The original match was better; choose it. */ skip_len = cur_len - 2; choose_cur_match: - if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) { - lzx_declare_repeat_offset_match(c, cur_len, - cur_offset_data, - &next_chosen_item); - queue = lzx_lru_queue_swap(queue, cur_offset_data); - } else { - lzx_declare_explicit_offset_match(c, cur_len, - cur_offset_data - LZX_OFFSET_ADJUSTMENT, - &next_chosen_item); - queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT); - } - - hc_matchfinder_skip_positions(&c->hc_mf, - in_begin, - in_next, - in_end, - skip_len); - in_next += skip_len; - } while (in_next < in_block_end); - - lzx_finish_block(c, os, in_next - in_block_begin, - next_chosen_item - c->chosen_items); + /* Choose a match and have the matchfinder skip over its + * remaining bytes. */ + lzx_choose_match(c, cur_len, cur_adjusted_offset, + recent_offsets, is_16_bit, + &litrunlen, &next_seq); + in_next = CALL_HC_MF(is_16_bit, c, + hc_matchfinder_skip_positions, + in_begin, + in_next - in_begin, + in_end - in_begin, + skip_len, + next_hashes); + + /* Keep going until it's time to end the block. */ + } while (in_next < in_max_block_end && + !(c->split_stats.num_new_observations >= + NUM_OBSERVATIONS_PER_BLOCK_CHECK && + in_next - in_block_begin >= MIN_BLOCK_SIZE && + in_end - in_next >= MIN_BLOCK_SIZE && + lzx_should_end_block(&c->split_stats))); + + /* Flush the block. */ + lzx_finish_sequence(next_seq, litrunlen); + lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0); + + /* Keep going until we've reached the end of the input buffer. */ } while (in_next != in_end); } static void -lzx_init_offset_slot_fast(struct lzx_compressor *c) +lzx_compress_lazy_16(struct lzx_compressor *c, const u8 *in, size_t in_nbytes, + struct lzx_output_bitstream *os) +{ + lzx_compress_lazy(c, in, in_nbytes, os, true); +} + +static void +lzx_compress_lazy_32(struct lzx_compressor *c, const u8 *in, size_t in_nbytes, + struct lzx_output_bitstream *os) { - u8 slot = 0; + lzx_compress_lazy(c, in, in_nbytes, os, false); +} - for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) { +/******************************************************************************/ +/* Compressor operations */ +/*----------------------------------------------------------------------------*/ - while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1]) +/* + * Generate tables for mapping match offsets (actually, "adjusted" match + * offsets) to offset slots. + */ +static void +lzx_init_offset_slot_tabs(struct lzx_compressor *c) +{ + u32 adjusted_offset = 0; + unsigned slot = 0; + + /* slots [0, 29] */ + for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1); + adjusted_offset++) + { + if (adjusted_offset >= lzx_offset_slot_base[slot + 1]) slot++; + c->offset_slot_tab_1[adjusted_offset] = slot; + } - c->offset_slot_fast[offset] = slot; + /* slots [30, 49] */ + for (; adjusted_offset < LZX_MAX_WINDOW_SIZE; + adjusted_offset += (u32)1 << 14) + { + if (adjusted_offset >= lzx_offset_slot_base[slot + 1]) + slot++; + c->offset_slot_tab_2[adjusted_offset >> 14] = slot; } } static size_t lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level) { - if (compression_level <= LZX_MAX_FAST_LEVEL) { - return offsetof(struct lzx_compressor, hc_mf) + - hc_matchfinder_size(max_bufsize); + if (compression_level <= MAX_FAST_LEVEL) { + if (lzx_is_16_bit(max_bufsize)) + return offsetof(struct lzx_compressor, hc_mf_16) + + hc_matchfinder_size_16(max_bufsize); + else + return offsetof(struct lzx_compressor, hc_mf_32) + + hc_matchfinder_size_32(max_bufsize); } else { - return offsetof(struct lzx_compressor, bt_mf) + - bt_matchfinder_size(max_bufsize); + if (lzx_is_16_bit(max_bufsize)) + return offsetof(struct lzx_compressor, bt_mf_16) + + bt_matchfinder_size_16(max_bufsize); + else + return offsetof(struct lzx_compressor, bt_mf_32) + + bt_matchfinder_size_32(max_bufsize); } } +/* Compute the amount of memory needed to allocate an LZX compressor. */ static u64 lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level, bool destructive) @@ -2022,10 +2809,11 @@ lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level, size += lzx_get_compressor_size(max_bufsize, compression_level); if (!destructive) - size += max_bufsize; /* in_buffer */ + size += max_bufsize; /* account for in_buffer */ return size; } +/* Allocate an LZX compressor. */ static int lzx_create_compressor(size_t max_bufsize, unsigned compression_level, bool destructive, void **c_ret) @@ -2033,92 +2821,85 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level, unsigned window_order; struct lzx_compressor *c; + /* Validate the maximum buffer size and get the window order from it. */ window_order = lzx_get_window_order(max_bufsize); if (window_order == 0) return WIMLIB_ERR_INVALID_PARAM; - c = ALIGNED_MALLOC(lzx_get_compressor_size(max_bufsize, - compression_level), - MATCHFINDER_ALIGNMENT); + /* Allocate the compressor. */ + c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level)); if (!c) goto oom0; - c->destructive = destructive; - - c->num_main_syms = lzx_get_num_main_syms(window_order); c->window_order = window_order; + c->num_main_syms = lzx_get_num_main_syms(window_order); + c->destructive = destructive; + /* Allocate the buffer for preprocessed data if needed. */ if (!c->destructive) { c->in_buffer = MALLOC(max_bufsize); if (!c->in_buffer) goto oom1; } - if (compression_level <= LZX_MAX_FAST_LEVEL) { + if (compression_level <= MAX_FAST_LEVEL) { - /* Fast compression: Use lazy parsing. */ + /* Fast compression: Use lazy parsing. */ + if (lzx_is_16_bit(max_bufsize)) + c->impl = lzx_compress_lazy_16; + else + c->impl = lzx_compress_lazy_32; - c->impl = lzx_compress_lazy; - c->max_search_depth = (36 * compression_level) / 20; - c->nice_match_length = (72 * compression_level) / 20; + /* Scale max_search_depth and nice_match_length with the + * compression level. */ + c->max_search_depth = (60 * compression_level) / 20; + c->nice_match_length = (80 * compression_level) / 20; /* lzx_compress_lazy() needs max_search_depth >= 2 because it * halves the max_search_depth when attempting a lazy match, and - * max_search_depth cannot be 0. */ - if (c->max_search_depth < 2) - c->max_search_depth = 2; + * max_search_depth must be at least 1. */ + c->max_search_depth = max(c->max_search_depth, 2); } else { - /* Normal / high compression: Use near-optimal parsing. */ - - c->impl = lzx_compress_near_optimal; + /* Normal / high compression: Use near-optimal parsing. */ + if (lzx_is_16_bit(max_bufsize)) + c->impl = lzx_compress_near_optimal_16; + else + c->impl = lzx_compress_near_optimal_32; - /* Scale nice_match_length and max_search_depth with the - * compression level. */ + /* Scale max_search_depth and nice_match_length with the + * compression level. */ c->max_search_depth = (24 * compression_level) / 50; - c->nice_match_length = (32 * compression_level) / 50; - - /* Set a number of optimization passes appropriate for the - * compression level. */ + c->nice_match_length = (48 * compression_level) / 50; + /* Also scale num_optim_passes with the compression level. But + * the more passes there are, the less they help --- so don't + * add them linearly. */ c->num_optim_passes = 1; - - if (compression_level >= 45) - c->num_optim_passes++; - - /* Use more optimization passes for higher compression levels. - * But the more passes there are, the less they help --- so - * don't add them linearly. */ - if (compression_level >= 70) { - c->num_optim_passes++; - if (compression_level >= 100) - c->num_optim_passes++; - if (compression_level >= 150) - c->num_optim_passes++; - if (compression_level >= 200) - c->num_optim_passes++; - if (compression_level >= 300) - c->num_optim_passes++; - } + c->num_optim_passes += (compression_level >= 45); + c->num_optim_passes += (compression_level >= 70); + c->num_optim_passes += (compression_level >= 100); + c->num_optim_passes += (compression_level >= 150); + c->num_optim_passes += (compression_level >= 200); + c->num_optim_passes += (compression_level >= 300); + + /* max_search_depth must be at least 1. */ + c->max_search_depth = max(c->max_search_depth, 1); } - /* max_search_depth == 0 is invalid. */ - if (c->max_search_depth < 1) - c->max_search_depth = 1; - - if (c->nice_match_length > LZX_MAX_MATCH_LEN) - c->nice_match_length = LZX_MAX_MATCH_LEN; + /* Prepare the offset => offset slot mapping. */ + lzx_init_offset_slot_tabs(c); - lzx_init_offset_slot_fast(c); *c_ret = c; return 0; oom1: - ALIGNED_FREE(c); + FREE(c); oom0: return WIMLIB_ERR_NOMEM; } +/* Compress a buffer of data. */ static size_t lzx_compress(const void *restrict in, size_t in_nbytes, void *restrict out, size_t out_nbytes_avail, void *restrict _c) @@ -2127,35 +2908,46 @@ lzx_compress(const void *restrict in, size_t in_nbytes, struct lzx_output_bitstream os; size_t result; - /* Don't bother trying to compress very small inputs. */ - if (in_nbytes < 100) + /* Don't bother trying to compress very small inputs. */ + if (in_nbytes < 64) return 0; - /* Copy the input data into the internal buffer and preprocess it. */ - if (c->destructive) - c->in_buffer = (void *)in; - else + /* If the compressor is in "destructive" mode, then we can directly + * preprocess the input data. Otherwise, we need to copy it into an + * internal buffer first. */ + if (!c->destructive) { memcpy(c->in_buffer, in, in_nbytes); - c->in_nbytes = in_nbytes; - lzx_do_e8_preprocessing(c->in_buffer, in_nbytes); + in = c->in_buffer; + } + + /* Preprocess the input data. */ + lzx_preprocess((void *)in, in_nbytes); - /* Initially, the previous Huffman codeword lengths are all zeroes. */ + /* Initially, the previous Huffman codeword lengths are all zeroes. */ c->codes_index = 0; memset(&c->codes[1].lens, 0, sizeof(struct lzx_lens)); - /* Initialize the output bitstream. */ + /* Initialize the output bitstream. */ lzx_init_output(&os, out, out_nbytes_avail); - /* Call the compression level-specific compress() function. */ - (*c->impl)(c, &os); + /* Call the compression level-specific compress() function. */ + (*c->impl)(c, in, in_nbytes, &os); - /* Flush the output bitstream and return the compressed size or 0. */ + /* Flush the output bitstream. */ result = lzx_flush_output(&os); - if (!result && c->destructive) - lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes); + + /* If the data did not compress to less than its original size and we + * preprocessed the original buffer, then postprocess it to restore it + * to its original state. */ + if (result == 0 && c->destructive) + lzx_postprocess((void *)in, in_nbytes); + + /* Return the number of compressed bytes, or 0 if the input did not + * compress to less than its original size. */ return result; } +/* Free an LZX compressor. */ static void lzx_free_compressor(void *_c) { @@ -2163,7 +2955,7 @@ lzx_free_compressor(void *_c) if (!c->destructive) FREE(c->in_buffer); - ALIGNED_FREE(c); + FREE(c); } const struct compressor_ops lzx_compressor_ops = {