*/
/*
- * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
+ * Copyright (C) 2012-2016 Eric Biggers
*
* This file is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
#endif
/*
- * Start a new LZX block (with new Huffman codes) after this many bytes.
+ * The compressor always chooses a block of at least MIN_BLOCK_LENGTH bytes,
+ * except if the last block has to be shorter.
+ */
+#define MIN_BLOCK_LENGTH 6500
+
+/*
+ * The compressor attempts to end blocks after SOFT_MAX_BLOCK_LENGTH bytes, but
+ * the final size might be larger due to matches extending beyond the end of the
+ * block. Specifically:
*
- * Note: actual block sizes may slightly exceed this value.
+ * - The greedy parser may choose an arbitrarily long match starting at the
+ * SOFT_MAX_BLOCK_LENGTH'th byte.
*
- * TODO: recursive splitting and cost evaluation might be good for an extremely
- * high compression mode, but otherwise it is almost always far too slow for how
- * much it helps. Perhaps some sort of heuristic would be useful?
+ * - The lazy parser may choose a sequence of literals starting at the
+ * SOFT_MAX_BLOCK_LENGTH'th byte when it sees a sequence of increasing good
+ * matches. The final match may be of arbitrary length. The length of the
+ * literal sequence is approximately limited by the "nice match length"
+ * parameter.
*/
-#define LZX_DIV_BLOCK_SIZE 32768
+#define SOFT_MAX_BLOCK_LENGTH 100000
/*
- * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
- * match cache for each byte position. This value should be high enough so that
- * nearly the time, all matches found in a given block can fit in the match
- * cache. However, fallback behavior on cache overflow is still required.
+ * The number of observed matches or literals that represents sufficient data to
+ * decide whether the current block should be terminated or not.
*/
-#define LZX_CACHE_PER_POS 6
+#define NUM_OBSERVATIONS_PER_BLOCK_CHECK 500
-#define LZX_CACHE_LEN (LZX_DIV_BLOCK_SIZE * (LZX_CACHE_PER_POS + 1))
+/*
+ * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
+ * excluding the extra "overflow" entries. This value should be high enough so
+ * that nearly the time, all matches found in a given block can fit in the match
+ * cache. However, fallback behavior (immediately terminating the block) on
+ * cache overflow is still required.
+ */
+#define LZX_CACHE_LENGTH (SOFT_MAX_BLOCK_LENGTH * 5)
+/*
+ * LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
+ * ever be saved in the match cache for a single position. Since each match we
+ * save for a single position has a distinct length, we can use the number of
+ * possible match lengths in LZX as this bound. This bound is guaranteed to be
+ * valid in all cases, although if 'nice_match_length < LZX_MAX_MATCH_LEN', then
+ * it will never actually be reached.
+ */
#define LZX_MAX_MATCHES_PER_POS LZX_NUM_LENS
/*
* LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
- * THis makes it possible to consider fractional bit costs.
+ * This makes it possible to consider fractional bit costs.
*
* Note: this is only useful as a statistical trick for when the true costs are
* unknown. In reality, each token in LZX requires a whole number of bits to
* output.
*/
-#define LZX_BIT_COST 16
+#define LZX_BIT_COST 64
/*
- * Consideration of aligned offset costs is disabled for now, due to
- * insufficient benefit gained from the time spent.
+ * Should the compressor take into account the costs of aligned offset symbols?
*/
-#define LZX_CONSIDER_ALIGNED_COSTS 0
+#define LZX_CONSIDER_ALIGNED_COSTS 1
/*
- * The maximum compression level at which we use the faster algorithm.
+ * LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
+ * faster algorithm.
*/
#define LZX_MAX_FAST_LEVEL 34
/*
- * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
- * for finding length 2 matches. This can be as high as 16 (in which case the
- * hash function is trivial), but using a smaller hash table actually speeds up
- * compression due to reduced cache pressure.
+ * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
+ * hash table for finding length 2 matches. This could be as high as 16, but
+ * using a smaller hash table speeds up compression due to reduced cache
+ * pressure.
*/
-#define LZX_HASH2_ORDER 12
-#define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
-
-#include "wimlib/lzx_common.h"
+#define BT_MATCHFINDER_HASH2_ORDER 12
/*
- * The maximum allowed window order for the matchfinder.
+ * These are the compressor-side limits on the codeword lengths for each Huffman
+ * code. To make outputting bits slightly faster, some of these limits are
+ * lower than the limits defined by the LZX format. This does not significantly
+ * affect the compression ratio, at least for the block lengths we use.
*/
-#define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
-
-#include <string.h>
+#define MAIN_CODEWORD_LIMIT 16
+#define LENGTH_CODEWORD_LIMIT 12
+#define ALIGNED_CODEWORD_LIMIT 7
+#define PRE_CODEWORD_LIMIT 7
-#include "wimlib/bt_matchfinder.h"
#include "wimlib/compress_common.h"
#include "wimlib/compressor_ops.h"
-#include "wimlib/endianness.h"
#include "wimlib/error.h"
-#include "wimlib/hc_matchfinder.h"
#include "wimlib/lz_extend.h"
+#include "wimlib/lzx_common.h"
#include "wimlib/unaligned.h"
#include "wimlib/util.h"
+/* Matchfinders with 16-bit positions */
+#define mf_pos_t u16
+#define MF_SUFFIX _16
+#include "wimlib/lcpit_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
+/* Matchfinders with 32-bit positions */
+#undef mf_pos_t
+#undef MF_SUFFIX
+#define mf_pos_t u32
+#define MF_SUFFIX _32
+#include "wimlib/lcpit_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
struct lzx_output_bitstream;
/* Codewords for the LZX Huffman codes. */
/* Codeword lengths (in bits) for the LZX Huffman codes.
* A zero length means the corresponding codeword has zero frequency. */
struct lzx_lens {
- u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS];
- u8 len[LZX_LENCODE_NUM_SYMBOLS];
+ u8 main[LZX_MAINCODE_MAX_NUM_SYMBOLS + 1];
+ u8 len[LZX_LENCODE_NUM_SYMBOLS + 1];
u8 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
-/* Intermediate LZX match/literal format */
-struct lzx_item {
+/* Block split statistics. See "Block splitting algorithm" below. */
+#define NUM_LITERAL_OBSERVATION_TYPES 8
+#define NUM_MATCH_OBSERVATION_TYPES 2
+#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
+struct block_split_stats {
+ u32 new_observations[NUM_OBSERVATION_TYPES];
+ u32 observations[NUM_OBSERVATION_TYPES];
+ u32 num_new_observations;
+ u32 num_observations;
+};
- /* Bits 0 - 9: Main symbol
- * Bits 10 - 17: Length symbol
- * Bits 18 - 22: Number of extra offset bits
- * Bits 23+ : Extra offset bits */
- u64 data;
+/*
+ * Represents a run of literals followed by a match or end-of-block. This
+ * struct is needed to temporarily store items chosen by the parser, since items
+ * cannot be written until all items for the block have been chosen and the
+ * block's Huffman codes have been computed.
+ */
+struct lzx_sequence {
+
+ /* The number of literals in the run. This may be 0. The literals are
+ * not stored explicitly in this structure; instead, they are read
+ * directly from the uncompressed data. */
+ u16 litrunlen;
+
+ /* If the next field doesn't indicate end-of-block, then this is the
+ * match length minus LZX_MIN_MATCH_LEN. */
+ u16 adjusted_length;
+
+ /* If bit 31 is clear, then this field contains the match header in bits
+ * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
+ * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
+ * sequence's literal run was the last literal run in the block, so
+ * there is no match that follows it. */
+ u32 adjusted_offset_and_match_hdr;
};
/*
* This variable is divided into two bitfields.
*
* Literals:
- * Low bits are 1, high bits are the literal.
+ * Low bits are 0, high bits are the literal.
*
* Explicit offset matches:
* Low bits are the match length, high bits are the offset plus 2.
u32 item;
#define OPTIMUM_OFFSET_SHIFT 9
#define OPTIMUM_LEN_MASK ((1 << OPTIMUM_OFFSET_SHIFT) - 1)
+#define OPTIMUM_EXTRA_FLAG 0x80000000
+ u32 extra_match;
+ u32 extra_literal;
} _aligned_attribute(8);
/*
};
}
-/* Pop a match offset off the front (most recently used) end of the queue. */
-static inline u32
-lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
-{
- u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
- queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
- return offset;
-}
-
/* Swap a match offset to the front of the queue. */
static inline struct lzx_lru_queue
lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
/* Pointer to the compress() implementation chosen at allocation time */
void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
+ /* If true, the compressor need not preserve the input buffer if it
+ * compresses the data successfully. */
+ bool destructive;
+
/* The Huffman symbol frequency counters for the current block. */
struct lzx_freqs freqs;
+ /* Block split statistics. */
+ struct block_split_stats split_stats;
+
/* The Huffman codes for the current and previous blocks. The one with
* index 'codes_index' is for the current block, and the other one is
* for the previous block. */
struct lzx_codes codes[2];
unsigned codes_index;
- /* The match/literal sequence the algorithm chose for the current block.
- */
- struct lzx_item chosen_items[LZX_DIV_BLOCK_SIZE + LZX_MAX_MATCH_LEN + 1];
+ /* The matches and literals that the parser has chosen for the current
+ * block. The required length of this array is limited by the maximum
+ * number of matches that can ever be chosen for a single block, plus
+ * one for the special entry at the end. */
+ struct lzx_sequence chosen_sequences[
+ DIV_ROUND_UP(SOFT_MAX_BLOCK_LENGTH, LZX_MIN_MATCH_LEN) + 1];
+
+ /* Tables for mapping adjusted offsets to offset slots */
+
+ /* offset slots [0, 29] */
+ u8 offset_slot_tab_1[32768];
- /* Table mapping match offset => offset slot for small offsets */
-#define LZX_NUM_FAST_OFFSETS 32768
- u8 offset_slot_fast[LZX_NUM_FAST_OFFSETS];
+ /* offset slots [30, 49] */
+ u8 offset_slot_tab_2[128];
union {
/* Data for greedy or lazy parsing */
struct {
/* Hash chains matchfinder (MUST BE LAST!!!) */
- struct hc_matchfinder hc_mf;
+ union {
+ struct hc_matchfinder_16 hc_mf_16;
+ struct hc_matchfinder_32 hc_mf_32;
+ };
};
/* Data for near-optimal parsing */
struct {
- /* The graph nodes for the current block */
- struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
+ /*
+ * Array of nodes, one per position, for running the
+ * minimum-cost path algorithm.
+ *
+ * This array must be large enough to accommodate the
+ * worst-case number of nodes, which occurs if we find a
+ * match of length LZX_MAX_MATCH_LEN at position
+ * SOFT_MAX_BLOCK_LENGTH - 1, producing a block of length
+ * SOFT_MAX_BLOCK_LENGTH - 1 + LZX_MAX_MATCH_LEN. Add one
+ * for the end-of-block node.
+ */
+ struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_LENGTH - 1 +
LZX_MAX_MATCH_LEN + 1];
/* The cost model for the current block */
struct lzx_costs costs;
- /* Cached matches for the current block */
- struct lz_match match_cache[LZX_CACHE_LEN + 1 +
- LZX_MAX_MATCHES_PER_POS];
- struct lz_match *cache_overflow_mark;
-
- /* Hash table for finding length 2 matches */
- pos_t hash2_tab[LZX_HASH2_LENGTH]
- _aligned_attribute(MATCHFINDER_ALIGNMENT);
+ /*
+ * Cached matches for the current block. This array
+ * contains the matches that were found at each position
+ * in the block. Specifically, for each position, there
+ * is a special 'struct lz_match' whose 'length' field
+ * contains the number of matches that were found at
+ * that position; this is followed by the matches
+ * themselves, if any, sorted by strictly increasing
+ * length.
+ *
+ * Note: in rare cases, there will be a very high number
+ * of matches in the block and this array will overflow.
+ * If this happens, we force the end of the current
+ * block. LZX_CACHE_LENGTH is the length at which we
+ * actually check for overflow. The extra slots beyond
+ * this are enough to absorb the worst case overflow,
+ * which occurs if starting at
+ * &match_cache[LZX_CACHE_LENGTH - 1], we write the
+ * match count header, then write
+ * LZX_MAX_MATCHES_PER_POS matches, then skip searching
+ * for matches at 'LZX_MAX_MATCH_LEN - 1' positions and
+ * write the match count header for each.
+ */
+ struct lz_match match_cache[LZX_CACHE_LENGTH +
+ LZX_MAX_MATCHES_PER_POS +
+ LZX_MAX_MATCH_LEN - 1];
- /* Binary trees matchfinder (MUST BE LAST!!!) */
- struct bt_matchfinder bt_mf;
+ struct lcpit_matchfinder lcpit_mf;
};
};
};
-/* Compute a hash value for the next 2 bytes of uncompressed data. */
-static inline u32
-lz_hash_2_bytes(const u8 *in_next)
+/*
+ * Will a matchfinder using 16-bit positions be sufficient for compressing
+ * buffers of up to the specified size? The limit could be 65536 bytes, but we
+ * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
+ * This requires that the limit be no more than the length of offset_slot_tab_1
+ * (currently 32768).
+ */
+static inline bool
+lzx_is_16_bit(size_t max_bufsize)
{
- u16 next_2_bytes = load_u16_unaligned(in_next);
- if (LZX_HASH2_ORDER == 16)
- return next_2_bytes;
- else
- return lz_hash(next_2_bytes, LZX_HASH2_ORDER);
+ STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
+ return max_bufsize <= 32768;
}
+/*
+ * The following macros call either the 16-bit or the 32-bit version of a
+ * matchfinder function based on the value of 'is_16_bit', which will be known
+ * at compilation time.
+ */
+
+#define CALL_HC_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
+
/*
* Structure to keep track of the current state of sending bits to the
* compressed output buffer.
struct lzx_output_bitstream {
/* Bits that haven't yet been written to the output buffer. */
- u32 bitbuf;
+ machine_word_t bitbuf;
/* Number of bits currently held in @bitbuf. */
u32 bitcount;
/* Pointer to the start of the output buffer. */
- le16 *start;
+ u8 *start;
/* Pointer to the position in the output buffer at which the next coding
* unit should be written. */
- le16 *next;
+ u8 *next;
- /* Pointer past the end of the output buffer. */
- le16 *end;
+ /* Pointer just past the end of the output buffer, rounded down to a
+ * 2-byte boundary. */
+ u8 *end;
};
+/* Can the specified number of bits always be added to 'bitbuf' after any
+ * pending 16-bit coding units have been flushed? */
+#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15)
+
/*
* Initialize the output bitstream.
*
os->bitcount = 0;
os->start = buffer;
os->next = os->start;
- os->end = os->start + size / sizeof(le16);
+ os->end = os->start + (size & ~1);
}
-/*
- * Write some bits to the output bitstream.
- *
- * The bits are given by the low-order @num_bits bits of @bits. Higher-order
- * bits in @bits cannot be set. At most 17 bits can be written at once.
- *
- * @max_num_bits is a compile-time constant that specifies the maximum number of
- * bits that can ever be written at the call site. Currently, it is used to
- * optimize away the conditional code for writing a second 16-bit coding unit
- * when writing fewer than 17 bits.
- *
- * If the output buffer space is exhausted, then the bits will be ignored, and
- * lzx_flush_output() will return 0 when it gets called.
- */
+/* Add some bits to the bitbuffer variable of the output bitstream. The caller
+ * must make sure there is enough room. */
static inline void
-lzx_write_varbits(struct lzx_output_bitstream *os,
- const u32 bits, const unsigned num_bits,
- const unsigned max_num_bits)
+lzx_add_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
{
- /* This code is optimized for LZX, which never needs to write more than
- * 17 bits at once. */
- LZX_ASSERT(num_bits <= 17);
- LZX_ASSERT(num_bits <= max_num_bits);
- LZX_ASSERT(os->bitcount <= 15);
-
- /* Add the bits to the bit buffer variable. @bitcount will be at most
- * 15, so there will be just enough space for the maximum possible
- * @num_bits of 17. */
- os->bitcount += num_bits;
os->bitbuf = (os->bitbuf << num_bits) | bits;
+ os->bitcount += num_bits;
+}
- /* Check whether any coding units need to be written. */
- if (os->bitcount >= 16) {
-
- os->bitcount -= 16;
-
- /* Write a coding unit, unless it would overflow the buffer. */
- if (os->next != os->end)
- put_unaligned_u16_le(os->bitbuf >> os->bitcount, os->next++);
+/* Flush bits from the bitbuffer variable to the output buffer. 'max_num_bits'
+ * specifies the maximum number of bits that may have been added since the last
+ * flush. */
+static inline void
+lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
+{
+ /* Masking the number of bits to shift is only needed to avoid undefined
+ * behavior; we don't actually care about the results of bad shifts. On
+ * x86, the explicit masking generates no extra code. */
+ const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
- /* If writing 17 bits, a second coding unit might need to be
- * written. But because 'max_num_bits' is a compile-time
- * constant, the compiler will optimize away this code at most
- * call sites. */
- if (max_num_bits == 17 && os->bitcount == 16) {
- if (os->next != os->end)
- put_unaligned_u16_le(os->bitbuf, os->next++);
- os->bitcount = 0;
- }
- }
+ if (os->end - os->next < 6)
+ return;
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
+ shift_mask), os->next + 0);
+ if (max_num_bits > 16)
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
+ shift_mask), os->next + 2);
+ if (max_num_bits > 32)
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
+ shift_mask), os->next + 4);
+ os->next += (os->bitcount >> 4) << 1;
+ os->bitcount &= 15;
}
-/* Use when @num_bits is a compile-time constant. Otherwise use
- * lzx_write_varbits(). */
+/* Add at most 16 bits to the bitbuffer and flush it. */
static inline void
-lzx_write_bits(struct lzx_output_bitstream *os,
- const u32 bits, const unsigned num_bits)
+lzx_write_bits(struct lzx_output_bitstream *os, u32 bits, unsigned num_bits)
{
- lzx_write_varbits(os, bits, num_bits, num_bits);
+ lzx_add_bits(os, bits, num_bits);
+ lzx_flush_bits(os, 16);
}
/*
static u32
lzx_flush_output(struct lzx_output_bitstream *os)
{
- if (os->next == os->end)
+ if (os->end - os->next < 6)
return 0;
- if (os->bitcount != 0)
- put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next++);
+ if (os->bitcount != 0) {
+ put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
+ os->next += 2;
+ }
- return (const u8 *)os->next - (const u8 *)os->start;
+ return os->next - os->start;
}
-/* Build the main, length, and aligned offset Huffman codes used in LZX.
+/*
+ * Build the main, length, and aligned offset Huffman codes used in LZX.
*
* This takes as input the frequency tables for each code and produces as output
- * a set of tables that map symbols to codewords and codeword lengths. */
+ * a set of tables that map symbols to codewords and codeword lengths.
+ */
static void
lzx_make_huffman_codes(struct lzx_compressor *c)
{
const struct lzx_freqs *freqs = &c->freqs;
struct lzx_codes *codes = &c->codes[c->codes_index];
+ STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
+ MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
+ STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
+ LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
+ STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
+ ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
+
make_canonical_huffman_code(c->num_main_syms,
- LZX_MAX_MAIN_CODEWORD_LEN,
+ MAIN_CODEWORD_LIMIT,
freqs->main,
codes->lens.main,
codes->codewords.main);
make_canonical_huffman_code(LZX_LENCODE_NUM_SYMBOLS,
- LZX_MAX_LEN_CODEWORD_LEN,
+ LENGTH_CODEWORD_LIMIT,
freqs->len,
codes->lens.len,
codes->codewords.len);
make_canonical_huffman_code(LZX_ALIGNEDCODE_NUM_SYMBOLS,
- LZX_MAX_ALIGNED_CODEWORD_LEN,
+ ALIGNED_CODEWORD_LIMIT,
freqs->aligned,
codes->lens.aligned,
codes->codewords.aligned);
static unsigned
lzx_compute_precode_items(const u8 lens[restrict],
const u8 prev_lens[restrict],
- const unsigned num_lens,
u32 precode_freqs[restrict],
unsigned precode_items[restrict])
{
itemptr = precode_items;
run_start = 0;
- do {
- /* Find the next run of codeword lengths. */
+
+ while (!((len = lens[run_start]) & 0x80)) {
/* len = the length being repeated */
- len = lens[run_start];
+
+ /* Find the next run of codeword lengths. */
run_end = run_start + 1;
/* Fast case for a single length. */
- if (likely(run_end == num_lens || len != lens[run_end])) {
+ if (likely(len != lens[run_end])) {
delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
/* Extend the run. */
do {
run_end++;
- } while (run_end != num_lens && len == lens[run_end]);
+ } while (len == lens[run_end]);
if (len == 0) {
/* Run of zeroes. */
*itemptr++ = delta;
run_start++;
}
- } while (run_start != num_lens);
+ }
return itemptr - precode_items;
}
unsigned precode_item;
unsigned precode_sym;
unsigned i;
+ u8 saved = lens[num_lens];
+ *(u8 *)(lens + num_lens) = 0x80;
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
precode_freqs[i] = 0;
* the codeword lengths in the larger code will be output. */
num_precode_items = lzx_compute_precode_items(lens,
prev_lens,
- num_lens,
precode_freqs,
precode_items);
/* Build the precode. */
+ STATIC_ASSERT(PRE_CODEWORD_LIMIT >= 5 &&
+ PRE_CODEWORD_LIMIT <= LZX_MAX_PRE_CODEWORD_LEN);
make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
+ PRE_CODEWORD_LIMIT,
precode_freqs, precode_lens,
precode_codewords);
for (i = 0; i < num_precode_items; i++) {
precode_item = precode_items[i];
precode_sym = precode_item & 0x1F;
- lzx_write_varbits(os, precode_codewords[precode_sym],
- precode_lens[precode_sym],
- LZX_MAX_PRE_CODEWORD_LEN);
+ lzx_add_bits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym]);
if (precode_sym >= 17) {
if (precode_sym == 17) {
- lzx_write_bits(os, precode_item >> 5, 4);
+ lzx_add_bits(os, precode_item >> 5, 4);
} else if (precode_sym == 18) {
- lzx_write_bits(os, precode_item >> 5, 5);
+ lzx_add_bits(os, precode_item >> 5, 5);
} else {
- lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ lzx_add_bits(os, (precode_item >> 5) & 1, 1);
precode_sym = precode_item >> 6;
- lzx_write_varbits(os, precode_codewords[precode_sym],
- precode_lens[precode_sym],
- LZX_MAX_PRE_CODEWORD_LEN);
+ lzx_add_bits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym]);
}
}
+ STATIC_ASSERT(CAN_BUFFER(2 * PRE_CODEWORD_LIMIT + 1));
+ lzx_flush_bits(os, 2 * PRE_CODEWORD_LIMIT + 1);
}
-}
-
-/* Output a match or literal. */
-static inline void
-lzx_write_item(struct lzx_output_bitstream *os, struct lzx_item item,
- unsigned ones_if_aligned, const struct lzx_codes *codes)
-{
- u64 data = item.data;
- unsigned main_symbol;
- unsigned len_symbol;
- unsigned num_extra_bits;
- u32 extra_bits;
-
- main_symbol = data & 0x3FF;
-
- lzx_write_varbits(os, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol],
- LZX_MAX_MAIN_CODEWORD_LEN);
-
- if (main_symbol < LZX_NUM_CHARS) /* Literal? */
- return;
-
- len_symbol = (data >> 10) & 0xFF;
-
- if (len_symbol != LZX_LENCODE_NUM_SYMBOLS) {
- lzx_write_varbits(os, codes->codewords.len[len_symbol],
- codes->lens.len[len_symbol],
- LZX_MAX_LEN_CODEWORD_LEN);
- }
-
- num_extra_bits = (data >> 18) & 0x1F;
- if (num_extra_bits == 0) /* Small offset or repeat offset match? */
- return;
- extra_bits = data >> 23;
-
- if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
-
- /* Aligned offset blocks: The low 3 bits of the extra offset
- * bits are Huffman-encoded using the aligned offset code. The
- * remaining bits are output literally. */
-
- lzx_write_varbits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
- num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS,
- 17 - LZX_NUM_ALIGNED_OFFSET_BITS);
-
- lzx_write_varbits(os,
- codes->codewords.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
- codes->lens.aligned[extra_bits & LZX_ALIGNED_OFFSET_BITMASK],
- LZX_MAX_ALIGNED_CODEWORD_LEN);
- } else {
- /* Verbatim blocks, or fewer than 3 extra bits: All extra
- * offset bits are output literally. */
- lzx_write_varbits(os, extra_bits, num_extra_bits, 17);
- }
+ *(u8 *)(lens + num_lens) = saved;
}
/*
* @block_type
* The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
* LZX_BLOCKTYPE_VERBATIM).
- * @items
- * The array of matches/literals to output.
- * @num_items
- * Number of matches/literals to output (length of @items).
+ * @block_data
+ * The uncompressed data of the block.
+ * @sequences
+ * The matches and literals to output, given as a series of sequences.
* @codes
* The main, length, and aligned offset Huffman codes for the current
* LZX compressed block.
*/
static void
-lzx_write_items(struct lzx_output_bitstream *os, int block_type,
- const struct lzx_item items[], u32 num_items,
- const struct lzx_codes *codes)
+lzx_write_sequences(struct lzx_output_bitstream *os, int block_type,
+ const u8 *block_data, const struct lzx_sequence sequences[],
+ const struct lzx_codes *codes)
{
- unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
+ const struct lzx_sequence *seq = sequences;
+ u32 ones_if_aligned = 0 - (block_type == LZX_BLOCKTYPE_ALIGNED);
+
+ for (;;) {
+ /* Output the next sequence. */
+
+ unsigned litrunlen = seq->litrunlen;
+ unsigned match_hdr;
+ unsigned main_symbol;
+ unsigned adjusted_length;
+ u32 adjusted_offset;
+ unsigned offset_slot;
+ unsigned num_extra_bits;
+ u32 extra_bits;
+
+ /* Output the literal run of the sequence. */
+
+ if (litrunlen) { /* Is the literal run nonempty? */
+
+ /* Verify optimization is enabled on 64-bit */
+ STATIC_ASSERT(sizeof(machine_word_t) < 8 ||
+ CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT));
+
+ if (CAN_BUFFER(3 * MAIN_CODEWORD_LIMIT)) {
+
+ /* 64-bit: write 3 literals at a time. */
+ while (litrunlen >= 3) {
+ unsigned lit0 = block_data[0];
+ unsigned lit1 = block_data[1];
+ unsigned lit2 = block_data[2];
+ lzx_add_bits(os, codes->codewords.main[lit0],
+ codes->lens.main[lit0]);
+ lzx_add_bits(os, codes->codewords.main[lit1],
+ codes->lens.main[lit1]);
+ lzx_add_bits(os, codes->codewords.main[lit2],
+ codes->lens.main[lit2]);
+ lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
+ block_data += 3;
+ litrunlen -= 3;
+ }
+ if (litrunlen--) {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ if (litrunlen--) {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
+ } else {
+ lzx_flush_bits(os, 1 * MAIN_CODEWORD_LIMIT);
+ }
+ }
+ } else {
+ /* 32-bit: write 1 literal at a time. */
+ do {
+ unsigned lit = *block_data++;
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
+ lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
+ } while (--litrunlen);
+ }
+ }
+
+ /* Was this the last literal run? */
+ if (seq->adjusted_offset_and_match_hdr & 0x80000000)
+ return;
+
+ /* Nope; output the match. */
- for (u32 i = 0; i < num_items; i++)
- lzx_write_item(os, items[i], ones_if_aligned, codes);
+ match_hdr = seq->adjusted_offset_and_match_hdr & 0x1FF;
+ main_symbol = LZX_NUM_CHARS + match_hdr;
+ adjusted_length = seq->adjusted_length;
+
+ block_data += adjusted_length + LZX_MIN_MATCH_LEN;
+
+ offset_slot = match_hdr / LZX_NUM_LEN_HEADERS;
+ adjusted_offset = seq->adjusted_offset_and_match_hdr >> 9;
+
+ num_extra_bits = lzx_extra_offset_bits[offset_slot];
+ extra_bits = adjusted_offset - lzx_offset_slot_base[offset_slot];
+
+ #define MAX_MATCH_BITS (MAIN_CODEWORD_LIMIT + LENGTH_CODEWORD_LIMIT + \
+ 14 + ALIGNED_CODEWORD_LIMIT)
+
+ /* Verify optimization is enabled on 64-bit */
+ STATIC_ASSERT(sizeof(machine_word_t) < 8 || CAN_BUFFER(MAX_MATCH_BITS));
+
+ /* Output the main symbol for the match. */
+
+ lzx_add_bits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
+
+ /* If needed, output the length symbol for the match. */
+
+ if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
+ lzx_add_bits(os, codes->codewords.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS],
+ codes->lens.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
+ }
+
+ /* Output the extra offset bits for the match. In aligned
+ * offset blocks, the lowest 3 bits of the adjusted offset are
+ * Huffman-encoded using the aligned offset code, provided that
+ * there are at least extra 3 offset bits required. All other
+ * extra offset bits are output verbatim. */
+
+ if ((adjusted_offset & ones_if_aligned) >= 16) {
+
+ lzx_add_bits(os, extra_bits >> LZX_NUM_ALIGNED_OFFSET_BITS,
+ num_extra_bits - LZX_NUM_ALIGNED_OFFSET_BITS);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, 14);
+
+ lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK],
+ codes->lens.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK]);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
+ } else {
+ STATIC_ASSERT(CAN_BUFFER(17));
+
+ lzx_add_bits(os, extra_bits, num_extra_bits);
+ if (!CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, 17);
+ }
+
+ if (CAN_BUFFER(MAX_MATCH_BITS))
+ lzx_flush_bits(os, MAX_MATCH_BITS);
+
+ /* Advance to the next sequence. */
+ seq++;
+ }
}
static void
-lzx_write_compressed_block(int block_type,
- u32 block_size,
+lzx_write_compressed_block(const u8 *block_begin,
+ int block_type,
+ u32 block_length,
unsigned window_order,
unsigned num_main_syms,
- const struct lzx_item chosen_items[],
- u32 num_chosen_items,
+ const struct lzx_sequence sequences[],
const struct lzx_codes * codes,
const struct lzx_lens * prev_lens,
struct lzx_output_bitstream * os)
{
- LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
- block_type == LZX_BLOCKTYPE_VERBATIM);
-
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
lzx_write_bits(os, block_type, 3);
- /* Output the block size.
+ /*
+ * Output the block length.
*
- * The original LZX format seemed to always encode the block size in 3
+ * The original LZX format seemed to always encode the block length in 3
* bytes. However, the implementation in WIMGAPI, as used in WIM files,
- * uses the first bit to indicate whether the block is the default size
- * (32768) or a different size given explicitly by the next 16 bits.
+ * uses the first bit to indicate whether the block is the default
+ * length (32768) or a different length given explicitly by the next 16
+ * bits.
*
* By default, this compressor uses a window size of 32768 and therefore
* follows the WIMGAPI behavior. However, this compressor also supports
* window sizes greater than 32768 bytes, which do not appear to be
* supported by WIMGAPI. In such cases, we retain the default size bit
- * to mean a size of 32768 bytes but output non-default block size in 24
- * bits rather than 16. The compatibility of this behavior is unknown
- * because WIMs created with chunk size greater than 32768 can seemingly
- * only be opened by wimlib anyway. */
- if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
+ * to mean a size of 32768 bytes but output non-default block length in
+ * 24 bits rather than 16. The compatibility of this behavior is
+ * unknown because WIMs created with chunk size greater than 32768 can
+ * seemingly only be opened by wimlib anyway.
+ */
+ if (block_length == LZX_DEFAULT_BLOCK_SIZE) {
lzx_write_bits(os, 1, 1);
} else {
lzx_write_bits(os, 0, 1);
if (window_order >= 16)
- lzx_write_bits(os, block_size >> 16, 8);
+ lzx_write_bits(os, block_length >> 16, 8);
- lzx_write_bits(os, block_size & 0xFFFF, 16);
+ lzx_write_bits(os, block_length & 0xFFFF, 16);
}
/* If it's an aligned offset block, output the aligned offset code. */
LZX_LENCODE_NUM_SYMBOLS);
/* Output the compressed matches and literals. */
- lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
+ lzx_write_sequences(os, block_type, block_begin, sequences, codes);
}
/* Given the frequencies of symbols in an LZX-compressed block and the
}
/*
- * Finish an LZX block:
+ * Return the offset slot for the specified adjusted match offset, using the
+ * compressor's acceleration tables to speed up the mapping.
+ */
+static inline unsigned
+lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
+ bool is_16_bit)
+{
+ if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
+ return c->offset_slot_tab_1[adjusted_offset];
+ return c->offset_slot_tab_2[adjusted_offset >> 14];
+}
+
+/*
+ * Flush an LZX block:
*
- * - build the Huffman codes
- * - decide whether to output the block as VERBATIM or ALIGNED
- * - output the block
- * - swap the indices of the current and previous Huffman codes
+ * 1. Build the Huffman codes.
+ * 2. Decide whether to output the block as VERBATIM or ALIGNED.
+ * 3. Write the block.
+ * 4. Swap the indices of the current and previous Huffman codes.
*/
static void
-lzx_finish_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
- u32 block_size, u32 num_chosen_items)
+lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+ const u8 *block_begin, u32 block_length, u32 seq_idx)
{
int block_type;
block_type = lzx_choose_verbatim_or_aligned(&c->freqs,
&c->codes[c->codes_index]);
- lzx_write_compressed_block(block_type,
- block_size,
+ lzx_write_compressed_block(block_begin,
+ block_type,
+ block_length,
c->window_order,
c->num_main_syms,
- c->chosen_items,
- num_chosen_items,
+ &c->chosen_sequences[seq_idx],
&c->codes[c->codes_index],
&c->codes[c->codes_index ^ 1].lens,
os);
c->codes_index ^= 1;
}
-/* Return the offset slot for the specified offset, which must be
- * less than LZX_NUM_FAST_OFFSETS. */
-static inline unsigned
-lzx_get_offset_slot_fast(struct lzx_compressor *c, u32 offset)
+/* Tally the Huffman symbol for a literal and increment the literal run length.
+ */
+static inline void
+lzx_record_literal(struct lzx_compressor *c, unsigned literal, u32 *litrunlen_p)
{
- LZX_ASSERT(offset < LZX_NUM_FAST_OFFSETS);
- return c->offset_slot_fast[offset];
+ c->freqs.main[literal]++;
+ ++*litrunlen_p;
}
-/* Tally, and optionally record, the specified literal byte. */
+/* Tally the Huffman symbol for a match, save the match data and the length of
+ * the preceding literal run in the next lzx_sequence, and update the recent
+ * offsets queue. */
static inline void
-lzx_declare_literal(struct lzx_compressor *c, unsigned literal,
- struct lzx_item **next_chosen_item)
+lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
+ u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
+ u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
{
- unsigned main_symbol = lzx_main_symbol_for_literal(literal);
+ u32 litrunlen = *litrunlen_p;
+ struct lzx_sequence *next_seq = *next_seq_p;
+ unsigned offset_slot;
+ unsigned v;
- c->freqs.main[main_symbol]++;
+ v = length - LZX_MIN_MATCH_LEN;
- if (next_chosen_item) {
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = main_symbol,
- };
+ /* Save the literal run length and adjusted length. */
+ next_seq->litrunlen = litrunlen;
+ next_seq->adjusted_length = v;
+
+ /* Compute the length header and tally the length symbol if needed */
+ if (v >= LZX_NUM_PRIMARY_LENS) {
+ c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
+ v = LZX_NUM_PRIMARY_LENS;
}
-}
-/* Tally, and optionally record, the specified repeat offset match. */
-static inline void
-lzx_declare_repeat_offset_match(struct lzx_compressor *c,
- unsigned len, unsigned rep_index,
- struct lzx_item **next_chosen_item)
-{
- unsigned len_header;
- unsigned len_symbol;
- unsigned main_symbol;
+ /* Compute the offset slot */
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
- if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
- len_header = len - LZX_MIN_MATCH_LEN;
- len_symbol = LZX_LENCODE_NUM_SYMBOLS;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
- c->freqs.len[len_symbol]++;
- }
+ /* Compute the match header. */
+ v += offset_slot * LZX_NUM_LEN_HEADERS;
- main_symbol = lzx_main_symbol_for_match(rep_index, len_header);
+ /* Save the adjusted offset and match header. */
+ next_seq->adjusted_offset_and_match_hdr = (offset_data << 9) | v;
- c->freqs.main[main_symbol]++;
+ /* Tally the main symbol. */
+ c->freqs.main[LZX_NUM_CHARS + v]++;
- if (next_chosen_item) {
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = (u64)main_symbol | ((u64)len_symbol << 10),
- };
+ /* Update the recent offsets queue. */
+ if (offset_data < LZX_NUM_RECENT_OFFSETS) {
+ /* Repeat offset match */
+ swap(recent_offsets[0], recent_offsets[offset_data]);
+ } else {
+ /* Explicit offset match */
+
+ /* Tally the aligned offset symbol if needed */
+ if (offset_data >= 16)
+ c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
+
+ recent_offsets[2] = recent_offsets[1];
+ recent_offsets[1] = recent_offsets[0];
+ recent_offsets[0] = offset_data - LZX_OFFSET_ADJUSTMENT;
}
+
+ /* Reset the literal run length and advance to the next sequence. */
+ *next_seq_p = next_seq + 1;
+ *litrunlen_p = 0;
}
-/* Tally, and optionally record, the specified explicit offset match. */
+/* Finish the last lzx_sequence. The last lzx_sequence is just a literal run;
+ * there is no match. This literal run may be empty. */
static inline void
-lzx_declare_explicit_offset_match(struct lzx_compressor *c, unsigned len, u32 offset,
- struct lzx_item **next_chosen_item)
+lzx_finish_sequence(struct lzx_sequence *last_seq, u32 litrunlen)
{
- unsigned len_header;
- unsigned len_symbol;
- unsigned main_symbol;
- unsigned offset_slot;
- unsigned num_extra_bits;
- u32 extra_bits;
+ last_seq->litrunlen = litrunlen;
- if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
- len_header = len - LZX_MIN_MATCH_LEN;
- len_symbol = LZX_LENCODE_NUM_SYMBOLS;
- } else {
- len_header = LZX_NUM_PRIMARY_LENS;
- len_symbol = len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS;
- c->freqs.len[len_symbol]++;
- }
+ /* Special value to mark last sequence */
+ last_seq->adjusted_offset_and_match_hdr = 0x80000000;
+}
- offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
- lzx_get_offset_slot_fast(c, offset) :
- lzx_get_offset_slot(offset);
+/******************************************************************************/
- main_symbol = lzx_main_symbol_for_match(offset_slot, len_header);
+/*
+ * Block splitting algorithm. The problem is to decide when it is worthwhile to
+ * start a new block with new entropy codes. There is a theoretically optimal
+ * solution: recursively consider every possible block split, considering the
+ * exact cost of each block, and choose the minimum cost approach. But this is
+ * far too slow. Instead, as an approximation, we can count symbols and after
+ * every N symbols, compare the expected distribution of symbols based on the
+ * previous data with the actual distribution. If they differ "by enough", then
+ * start a new block.
+ *
+ * As an optimization and heuristic, we don't distinguish between every symbol
+ * but rather we combine many symbols into a single "observation type". For
+ * literals we only look at the high bits and low bits, and for matches we only
+ * look at whether the match is long or not. The assumption is that for typical
+ * "real" data, places that are good block boundaries will tend to be noticable
+ * based only on changes in these aggregate frequencies, without looking for
+ * subtle differences in individual symbols. For example, a change from ASCII
+ * bytes to non-ASCII bytes, or from few matches (generally less compressible)
+ * to many matches (generally more compressible), would be easily noticed based
+ * on the aggregates.
+ *
+ * For determining whether the frequency distributions are "different enough" to
+ * start a new block, the simply heuristic of splitting when the sum of absolute
+ * differences exceeds a constant seems to be good enough. We also add a number
+ * proportional to the block length so that the algorithm is more likely to end
+ * long blocks than short blocks. This reflects the general expectation that it
+ * will become increasingly beneficial to start a new block as the current
+ * blocks grows larger.
+ *
+ * Finally, for an approximation, it is not strictly necessary that the exact
+ * symbols being used are considered. With "near-optimal parsing", for example,
+ * the actual symbols that will be used are unknown until after the block
+ * boundary is chosen and the block has been optimized. Since the final choices
+ * cannot be used, we can use preliminary "greedy" choices instead.
+ */
- c->freqs.main[main_symbol]++;
+/* Initialize the block split statistics when starting a new block. */
+static void
+init_block_split_stats(struct block_split_stats *stats)
+{
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ stats->new_observations[i] = 0;
+ stats->observations[i] = 0;
+ }
+ stats->num_new_observations = 0;
+ stats->num_observations = 0;
+}
- num_extra_bits = lzx_extra_offset_bits[offset_slot];
+/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
+ * literal, for 8 possible literal observation types. */
+static inline void
+observe_literal(struct block_split_stats *stats, u8 lit)
+{
+ stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
+ stats->num_new_observations++;
+}
- if (num_extra_bits >= LZX_NUM_ALIGNED_OFFSET_BITS)
- c->freqs.aligned[(offset + LZX_OFFSET_ADJUSTMENT) &
- LZX_ALIGNED_OFFSET_BITMASK]++;
+/* Match observation. Heuristic: use one observation type for "short match" and
+ * one observation type for "long match". */
+static inline void
+observe_match(struct block_split_stats *stats, unsigned length)
+{
+ stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
+ stats->num_new_observations++;
+}
- if (next_chosen_item) {
+static bool
+do_end_block_check(struct block_split_stats *stats, u32 block_length)
+{
+ if (stats->num_observations > 0) {
+
+ /* Note: to avoid slow divisions, we do not divide by
+ * 'num_observations', but rather do all math with the numbers
+ * multiplied by 'num_observations'. */
+ u32 total_delta = 0;
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ u32 expected = stats->observations[i] * stats->num_new_observations;
+ u32 actual = stats->new_observations[i] * stats->num_observations;
+ u32 delta = (actual > expected) ? actual - expected :
+ expected - actual;
+ total_delta += delta;
+ }
- extra_bits = (offset + LZX_OFFSET_ADJUSTMENT) -
- lzx_offset_slot_base[offset_slot];
+ /* Ready to end the block? */
+ if (total_delta + (block_length / 1024) * stats->num_observations >=
+ stats->num_new_observations * 51 / 64 * stats->num_observations)
+ return true;
+ }
- BUILD_BUG_ON(LZX_MAINCODE_MAX_NUM_SYMBOLS > (1 << 10));
- BUILD_BUG_ON(LZX_LENCODE_NUM_SYMBOLS > (1 << 8));
- *(*next_chosen_item)++ = (struct lzx_item) {
- .data = (u64)main_symbol |
- ((u64)len_symbol << 10) |
- ((u64)num_extra_bits << 18) |
- ((u64)extra_bits << 23),
- };
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ stats->num_observations += stats->new_observations[i];
+ stats->observations[i] += stats->new_observations[i];
+ stats->new_observations[i] = 0;
}
+ stats->num_new_observations = 0;
+ return false;
}
-
-/* Tally, and optionally record, the specified match or literal. */
-static inline void
-lzx_declare_item(struct lzx_compressor *c, u32 item,
- struct lzx_item **next_chosen_item)
+static inline bool
+should_end_block(struct block_split_stats *stats,
+ const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
{
- u32 len = item & OPTIMUM_LEN_MASK;
- u32 offset_data = item >> OPTIMUM_OFFSET_SHIFT;
-
- if (len == 1)
- lzx_declare_literal(c, offset_data, next_chosen_item);
- else if (offset_data < LZX_NUM_RECENT_OFFSETS)
- lzx_declare_repeat_offset_match(c, len, offset_data,
- next_chosen_item);
- else
- lzx_declare_explicit_offset_match(c, len,
- offset_data - LZX_OFFSET_ADJUSTMENT,
- next_chosen_item);
+ /* Ready to check block split statistics? */
+ if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK ||
+ in_next - in_block_begin < MIN_BLOCK_LENGTH ||
+ in_end - in_next < MIN_BLOCK_LENGTH)
+ return false;
+
+ return do_end_block_check(stats, in_next - in_block_begin);
}
+/******************************************************************************/
+
+/*
+ * Given the minimum-cost path computed through the item graph for the current
+ * block, walk the path and count how many of each symbol in each Huffman-coded
+ * alphabet would be required to output the items (matches and literals) along
+ * the path.
+ *
+ * Note that the path will be walked backwards (from the end of the block to the
+ * beginning of the block), but this doesn't matter because this function only
+ * computes frequencies.
+ */
static inline void
-lzx_record_item_list(struct lzx_compressor *c,
- struct lzx_optimum_node *cur_node,
- struct lzx_item **next_chosen_item)
+lzx_tally_item_list(struct lzx_compressor *c, u32 block_length, bool is_16_bit)
{
- struct lzx_optimum_node *end_node;
- u32 saved_item;
- u32 item;
+ u32 node_idx = block_length;
+
+ for (;;) {
+ u32 item;
+ u32 len;
+ u32 offset_data;
+ unsigned v;
+ unsigned offset_slot;
+
+ /* Tally literals until either a match or the beginning of the
+ * block is reached. */
+ for (;;) {
+ item = c->optimum_nodes[node_idx].item;
+ if (item & OPTIMUM_LEN_MASK)
+ break;
+ c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
+ node_idx--;
+ }
- /* The list is currently in reverse order (last item to first item).
- * Reverse it. */
- end_node = cur_node;
- saved_item = cur_node->item;
- do {
- item = saved_item;
- cur_node -= item & OPTIMUM_LEN_MASK;
- saved_item = cur_node->item;
- cur_node->item = item;
- } while (cur_node != c->optimum_nodes);
-
- /* Walk the list of items from beginning to end, tallying and recording
- * each item. */
- do {
- lzx_declare_item(c, cur_node->item, next_chosen_item);
- cur_node += (cur_node->item) & OPTIMUM_LEN_MASK;
- } while (cur_node != end_node);
+ if (item & OPTIMUM_EXTRA_FLAG) {
+
+ if (node_idx == 0)
+ break;
+
+ /* Tally a rep0 match. */
+ len = item & OPTIMUM_LEN_MASK;
+ v = len - LZX_MIN_MATCH_LEN;
+ if (v >= LZX_NUM_PRIMARY_LENS) {
+ c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
+ v = LZX_NUM_PRIMARY_LENS;
+ }
+ c->freqs.main[LZX_NUM_CHARS + v]++;
+
+ /* Tally a literal. */
+ c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
+
+ item = c->optimum_nodes[node_idx].extra_match;
+ node_idx -= len + 1;
+ }
+
+ len = item & OPTIMUM_LEN_MASK;
+ offset_data = item >> OPTIMUM_OFFSET_SHIFT;
+
+ node_idx -= len;
+
+ /* Tally a match. */
+
+ /* Tally the aligned offset symbol if needed. */
+ if (offset_data >= 16)
+ c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
+
+ /* Tally the length symbol if needed. */
+ v = len - LZX_MIN_MATCH_LEN;;
+ if (v >= LZX_NUM_PRIMARY_LENS) {
+ c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
+ v = LZX_NUM_PRIMARY_LENS;
+ }
+
+ /* Tally the main symbol. */
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
+ v += offset_slot * LZX_NUM_LEN_HEADERS;
+ c->freqs.main[LZX_NUM_CHARS + v]++;
+ }
}
-static inline void
-lzx_tally_item_list(struct lzx_compressor *c, struct lzx_optimum_node *cur_node)
+/*
+ * Like lzx_tally_item_list(), but this function also generates the list of
+ * lzx_sequences for the minimum-cost path and writes it to c->chosen_sequences,
+ * ready to be output to the bitstream after the Huffman codes are computed.
+ * The lzx_sequences will be written to decreasing memory addresses as the path
+ * is walked backwards, which means they will end up in the expected
+ * first-to-last order. The return value is the index in c->chosen_sequences at
+ * which the lzx_sequences begin.
+ */
+static inline u32
+lzx_record_item_list(struct lzx_compressor *c, u32 block_length, bool is_16_bit)
{
- /* Since we're just tallying the items, we don't need to reverse the
- * list. Processing the items in reverse order is fine. */
- do {
- lzx_declare_item(c, cur_node->item, NULL);
- cur_node -= (cur_node->item & OPTIMUM_LEN_MASK);
- } while (cur_node != c->optimum_nodes);
+ u32 node_idx = block_length;
+ u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
+ u32 lit_start_node;
+
+ /* Special value to mark last sequence */
+ c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = 0x80000000;
+
+ lit_start_node = node_idx;
+ for (;;) {
+ u32 item;
+ u32 len;
+ u32 offset_data;
+ unsigned v;
+ unsigned offset_slot;
+
+ /* Tally literals until either a match or the beginning of the
+ * block is reached. */
+ for (;;) {
+ item = c->optimum_nodes[node_idx].item;
+ if (item & OPTIMUM_LEN_MASK)
+ break;
+ c->freqs.main[item >> OPTIMUM_OFFSET_SHIFT]++;
+ node_idx--;
+ }
+
+ if (item & OPTIMUM_EXTRA_FLAG) {
+
+ if (node_idx == 0)
+ break;
+
+ /* Save the literal run length for the next sequence
+ * (the "previous sequence" when walking backwards). */
+ len = item & OPTIMUM_LEN_MASK;
+ c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
+ seq_idx--;
+ lit_start_node = node_idx - len;
+
+ /* Tally a rep0 match. */
+ v = len - LZX_MIN_MATCH_LEN;
+ c->chosen_sequences[seq_idx].adjusted_length = v;
+ if (v >= LZX_NUM_PRIMARY_LENS) {
+ c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
+ v = LZX_NUM_PRIMARY_LENS;
+ }
+ c->freqs.main[LZX_NUM_CHARS + v]++;
+ c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr = v;
+
+ /* Tally a literal. */
+ c->freqs.main[c->optimum_nodes[node_idx].extra_literal]++;
+
+ item = c->optimum_nodes[node_idx].extra_match;
+ node_idx -= len + 1;
+ }
+
+ len = item & OPTIMUM_LEN_MASK;
+ offset_data = item >> OPTIMUM_OFFSET_SHIFT;
+
+ /* Save the literal run length for the next sequence (the
+ * "previous sequence" when walking backwards). */
+ c->chosen_sequences[seq_idx--].litrunlen = lit_start_node - node_idx;
+ node_idx -= len;
+ lit_start_node = node_idx;
+
+ /* Record a match. */
+
+ /* Tally the aligned offset symbol if needed. */
+ if (offset_data >= 16)
+ c->freqs.aligned[offset_data & LZX_ALIGNED_OFFSET_BITMASK]++;
+
+ /* Save the adjusted length. */
+ v = len - LZX_MIN_MATCH_LEN;
+ c->chosen_sequences[seq_idx].adjusted_length = v;
+
+ /* Tally the length symbol if needed. */
+ if (v >= LZX_NUM_PRIMARY_LENS) {
+ c->freqs.len[v - LZX_NUM_PRIMARY_LENS]++;
+ v = LZX_NUM_PRIMARY_LENS;
+ }
+
+ /* Tally the main symbol. */
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
+ v += offset_slot * LZX_NUM_LEN_HEADERS;
+ c->freqs.main[LZX_NUM_CHARS + v]++;
+
+ /* Save the adjusted offset and match header. */
+ c->chosen_sequences[seq_idx].adjusted_offset_and_match_hdr =
+ (offset_data << 9) | v;
+ }
+
+ /* Save the literal run length for the first sequence. */
+ c->chosen_sequences[seq_idx].litrunlen = lit_start_node - node_idx;
+
+ /* Return the index in c->chosen_sequences at which the lzx_sequences
+ * begin. */
+ return seq_idx;
}
/*
* Find an inexpensive path through the graph of possible match/literal choices
* for the current block. The nodes of the graph are
- * c->optimum_nodes[0...block_size]. They correspond directly to the bytes in
+ * c->optimum_nodes[0...block_length]. They correspond directly to the bytes in
* the current block, plus one extra node for end-of-block. The edges of the
* graph are matches and literals. The goal is to find the minimum cost path
- * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_size]'.
+ * from 'c->optimum_nodes[0]' to 'c->optimum_nodes[block_length]', given the cost
+ * model 'c->costs'.
*
* The algorithm works forwards, starting at 'c->optimum_nodes[0]' and
* proceeding forwards one node at a time. At each node, a selection of matches
* Also, note that because of the presence of the recent offsets queue (which is
* a type of adaptive state), the algorithm cannot work backwards and compute
* "cost to end" instead of "cost to beginning". Furthermore, the way the
- * algorithm handles this adaptive state in the "minimum-cost" parse is actually
+ * algorithm handles this adaptive state in the "minimum cost" parse is actually
* only an approximation. It's possible for the globally optimal, minimum cost
* path to contain a prefix, ending at a position, where that path prefix is
* *not* the minimum cost path to that position. This can happen if such a path
* later. The algorithm does not solve this problem; it only considers the
* lowest cost to reach each individual position.
*/
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
const u8 * const restrict block_begin,
- const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+ const u32 block_length,
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
struct lzx_optimum_node *cur_node = c->optimum_nodes;
- struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
struct lz_match *cache_ptr = c->match_cache;
const u8 *in_next = block_begin;
- const u8 * const block_end = block_begin + block_size;
+ const u8 * const block_end = block_begin + block_length;
/* Instead of storing the match offset LRU queues in the
* 'lzx_optimum_node' structures, we save memory (and cache lines) by
* it is no longer needed. */
struct lzx_lru_queue queues[512];
- BUILD_BUG_ON(ARRAY_LEN(queues) < LZX_MAX_MATCH_LEN + 1);
+ STATIC_ASSERT(ARRAY_LEN(queues) >= LZX_MAX_MATCH_LEN + 1);
#define QUEUE(in) (queues[(uintptr_t)(in) % ARRAY_LEN(queues)])
/* Initially, the cost to reach each node is "infinity". */
memset(c->optimum_nodes, 0xFF,
- (block_size + 1) * sizeof(c->optimum_nodes[0]));
+ (block_length + 1) * sizeof(c->optimum_nodes[0]));
QUEUE(block_begin) = initial_queue;
- /* The following loop runs 'block_size' iterations, one per node. */
+ /* The following loop runs 'block_length' iterations, one per node. */
do {
unsigned num_matches;
unsigned literal;
u32 cost;
+ struct lz_match *matches;
/*
* A selection of matches for the block was already saved in
num_matches = cache_ptr->length;
cache_ptr++;
+ matches = cache_ptr;
if (num_matches) {
- struct lz_match *end_matches = cache_ptr + num_matches;
unsigned next_len = LZX_MIN_MATCH_LEN;
unsigned max_len = min(block_end - in_next, LZX_MAX_MATCH_LEN);
const u8 *matchptr;
matchptr = in_next - lzx_lru_queue_R0(QUEUE(in_next));
if (load_u16_unaligned(matchptr) != load_u16_unaligned(in_next))
goto R0_done;
- BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
+ STATIC_ASSERT(LZX_MIN_MATCH_LEN == 2);
do {
u32 cost = cur_node->cost +
c->costs.match_cost[0][
(cur_node + next_len)->item =
(0 << OPTIMUM_OFFSET_SHIFT) | next_len;
}
- if (unlikely(++next_len > max_len)) {
- cache_ptr = end_matches;
+ if (unlikely(++next_len > max_len))
goto done_matches;
- }
} while (in_next[next_len - 1] == matchptr[next_len - 1]);
R0_done:
(cur_node + next_len)->item =
(1 << OPTIMUM_OFFSET_SHIFT) | next_len;
}
- if (unlikely(++next_len > max_len)) {
- cache_ptr = end_matches;
+ if (unlikely(++next_len > max_len))
goto done_matches;
- }
} while (in_next[next_len - 1] == matchptr[next_len - 1]);
R1_done:
(cur_node + next_len)->item =
(2 << OPTIMUM_OFFSET_SHIFT) | next_len;
}
- if (unlikely(++next_len > max_len)) {
- cache_ptr = end_matches;
+ if (unlikely(++next_len > max_len))
goto done_matches;
- }
} while (in_next[next_len - 1] == matchptr[next_len - 1]);
R2_done:
-
- while (next_len > cache_ptr->length)
- if (++cache_ptr == end_matches)
+ matches = cache_ptr;
+ cache_ptr += num_matches - 1;
+ while (next_len > cache_ptr->length) {
+ if (cache_ptr == matches)
goto done_matches;
+ cache_ptr--;
+ }
/* Consider explicit offset matches */
- do {
+ for (;;) {
u32 offset = cache_ptr->offset;
u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
- unsigned offset_slot = (offset < LZX_NUM_FAST_OFFSETS) ?
- lzx_get_offset_slot_fast(c, offset) :
- lzx_get_offset_slot(offset);
+ unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
+ is_16_bit);
+ u32 base_cost = cur_node->cost;
+ u32 cost;
+
+ #if LZX_CONSIDER_ALIGNED_COSTS
+ if (offset_data >= 16)
+ base_cost += c->costs.aligned[offset_data &
+ LZX_ALIGNED_OFFSET_BITMASK];
+ #endif
do {
- u32 cost = cur_node->cost +
- c->costs.match_cost[offset_slot][
+ cost = base_cost +
+ c->costs.match_cost[offset_slot][
next_len - LZX_MIN_MATCH_LEN];
- #if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >=
- LZX_NUM_ALIGNED_OFFSET_BITS)
- cost += c->costs.aligned[offset_data &
- LZX_ALIGNED_OFFSET_BITMASK];
- #endif
if (cost < (cur_node + next_len)->cost) {
(cur_node + next_len)->cost = cost;
(cur_node + next_len)->item =
(offset_data << OPTIMUM_OFFSET_SHIFT) | next_len;
}
} while (++next_len <= cache_ptr->length);
- } while (++cache_ptr != end_matches);
+
+ if (cache_ptr == matches) {
+ /* Consider match + lit + rep0 */
+ u32 remaining = block_end - (in_next + next_len);
+ if (likely(remaining >= 2)) {
+ const u8 *strptr = in_next + next_len;
+ const u8 *matchptr = strptr - offset;
+ if (unlikely(load_u16_unaligned(strptr) == load_u16_unaligned(matchptr))) {
+ u32 rep0_len = lz_extend(strptr, matchptr, 2,
+ min(remaining, LZX_MAX_MATCH_LEN));
+ u8 lit = strptr[-1];
+ cost += c->costs.main[lit] +
+ c->costs.match_cost[0][rep0_len - LZX_MIN_MATCH_LEN];
+ u32 total_len = next_len + rep0_len;
+ if (cost < (cur_node + total_len)->cost) {
+ (cur_node + total_len)->cost = cost;
+ (cur_node + total_len)->item =
+ OPTIMUM_EXTRA_FLAG | rep0_len;
+ (cur_node + total_len)->extra_literal = lit;
+ (cur_node + total_len)->extra_match =
+ (offset_data << OPTIMUM_OFFSET_SHIFT) | (next_len - 1);
+ }
+ }
+ }
+ break;
+ }
+ cache_ptr--;
+ }
}
done_matches:
+ cache_ptr = matches + num_matches;
/* Consider coding a literal.
* of coding the literal is integrated into the queue update
* code below. */
literal = *in_next++;
- cost = cur_node->cost +
- c->costs.main[lzx_main_symbol_for_literal(literal)];
+ cost = cur_node->cost + c->costs.main[literal];
/* Advance to the next position. */
cur_node++;
if (cost <= cur_node->cost) {
/* Literal: queue remains unchanged. */
cur_node->cost = cost;
- cur_node->item = (literal << OPTIMUM_OFFSET_SHIFT) | 1;
+ cur_node->item = (u32)literal << OPTIMUM_OFFSET_SHIFT;
QUEUE(in_next) = QUEUE(in_next - 1);
} else {
/* Match: queue update is needed. */
unsigned len = cur_node->item & OPTIMUM_LEN_MASK;
- u32 offset_data = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
+ u32 offset_data = (cur_node->item &
+ ~OPTIMUM_EXTRA_FLAG) >> OPTIMUM_OFFSET_SHIFT;
if (offset_data >= LZX_NUM_RECENT_OFFSETS) {
/* Explicit offset match: insert offset at front */
QUEUE(in_next) =
lzx_lru_queue_push(QUEUE(in_next - len),
offset_data - LZX_OFFSET_ADJUSTMENT);
+ } else if (cur_node->item & OPTIMUM_EXTRA_FLAG) {
+ /* Explicit offset match, then literal, then
+ * rep0 match: insert offset at front */
+ len += 1 + (cur_node->extra_match & OPTIMUM_LEN_MASK);
+ QUEUE(in_next) =
+ lzx_lru_queue_push(QUEUE(in_next - len),
+ (cur_node->extra_match >> OPTIMUM_OFFSET_SHIFT) -
+ LZX_OFFSET_ADJUSTMENT);
} else {
/* Repeat offset match: swap offset to front */
QUEUE(in_next) =
offset_data);
}
}
- } while (cur_node != end_node);
+ } while (in_next != block_end);
- /* Return the match offset queue at the end of the minimum-cost path. */
+ /* Return the match offset queue at the end of the minimum cost path. */
return QUEUE(block_end);
}
static void
lzx_compute_match_costs(struct lzx_compressor *c)
{
- unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
+ unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+ LZX_NUM_LEN_HEADERS;
struct lzx_costs *costs = &c->costs;
for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
- unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
+ unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
+ LZX_NUM_LEN_HEADERS);
unsigned i;
#if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
+ if (offset_slot >= 8)
extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
#endif
/* Set default LZX Huffman symbol costs to bootstrap the iterative optimization
* algorithm. */
static void
-lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_size)
+lzx_set_default_costs(struct lzx_compressor *c, const u8 *block, u32 block_length)
{
u32 i;
bool have_byte[256];
unsigned num_used_bytes;
- /* The costs below are hard coded to use a scaling factor of 16. */
- BUILD_BUG_ON(LZX_BIT_COST != 16);
+ /* The costs below are hard coded to use a scaling factor of 64. */
+ STATIC_ASSERT(LZX_BIT_COST == 64);
/*
* Heuristics:
for (i = 0; i < 256; i++)
have_byte[i] = false;
- for (i = 0; i < block_size; i++)
+ for (i = 0; i < block_length; i++)
have_byte[block[i]] = true;
num_used_bytes = 0;
num_used_bytes += have_byte[i];
for (i = 0; i < 256; i++)
- c->costs.main[i] = 140 - (256 - num_used_bytes) / 4;
+ c->costs.main[i] = 560 - (256 - num_used_bytes);
for (; i < c->num_main_syms; i++)
- c->costs.main[i] = 170;
+ c->costs.main[i] = 680;
for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = 103 + (i / 4);
+ c->costs.len[i] = 412 + i;
#if LZX_CONSIDER_ALIGNED_COSTS
for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
/* Update the current cost model to reflect the computed Huffman codes. */
static void
-lzx_update_costs(struct lzx_compressor *c)
+lzx_set_costs_from_codes(struct lzx_compressor *c)
{
unsigned i;
const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
- for (i = 0; i < c->num_main_syms; i++)
- c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < c->num_main_syms; i++) {
+ c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+ MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+ c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+ LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#if LZX_CONSIDER_ALIGNED_COSTS
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+ ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#endif
lzx_compute_match_costs(c);
}
-static struct lzx_lru_queue
-lzx_optimize_and_write_block(struct lzx_compressor *c,
- struct lzx_output_bitstream *os,
- const u8 *block_begin, const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+/*
+ * Choose a "near-optimal" literal/match sequence to use for the current block.
+ * Because the cost of each Huffman symbol is unknown until the Huffman codes
+ * have been built and the Huffman codes themselves depend on the symbol
+ * frequencies, this uses an iterative optimization algorithm to approximate an
+ * optimal solution. The first optimization pass for the block uses default
+ * costs. Additional passes use costs taken from the Huffman codes computed in
+ * the previous pass.
+ */
+static inline struct lzx_lru_queue
+lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
+ struct lzx_output_bitstream * const restrict os,
+ const u8 * const restrict block_begin,
+ const u32 block_length,
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
unsigned num_passes_remaining = c->num_optim_passes;
- struct lzx_item *next_chosen_item;
struct lzx_lru_queue new_queue;
+ u32 seq_idx;
- /* The first optimization pass uses a default cost model. Each
- * additional optimization pass uses a cost model derived from the
- * Huffman code computed in the previous pass. */
+ lzx_set_default_costs(c, block_begin, block_length);
- lzx_set_default_costs(c, block_begin, block_size);
- lzx_reset_symbol_frequencies(c);
- do {
- new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
- initial_queue);
- if (num_passes_remaining > 1) {
- lzx_tally_item_list(c, c->optimum_nodes + block_size);
- lzx_make_huffman_codes(c);
- lzx_update_costs(c);
- lzx_reset_symbol_frequencies(c);
- }
- } while (--num_passes_remaining);
+ for (;;) {
+ new_queue = lzx_find_min_cost_path(c, block_begin, block_length,
+ initial_queue, is_16_bit);
+
+ if (--num_passes_remaining == 0)
+ break;
+
+ /* At least one iteration remains; update the costs. */
+ lzx_reset_symbol_frequencies(c);
+ lzx_tally_item_list(c, block_length, is_16_bit);
+ lzx_make_huffman_codes(c);
+ lzx_set_costs_from_codes(c);
+ }
- next_chosen_item = c->chosen_items;
- lzx_record_item_list(c, c->optimum_nodes + block_size, &next_chosen_item);
- lzx_finish_block(c, os, block_size, next_chosen_item - c->chosen_items);
+ /* Done optimizing. Generate the sequence list and flush the block. */
+ lzx_reset_symbol_frequencies(c);
+ seq_idx = lzx_record_item_list(c, block_length, is_16_bit);
+ lzx_flush_block(c, os, block_begin, block_length, seq_idx);
return new_queue;
}
* time, but rather to produce a compression ratio significantly better than a
* simpler "greedy" or "lazy" parse while still being relatively fast.
*/
-static void
-lzx_compress_near_optimal(struct lzx_compressor *c,
- struct lzx_output_bitstream *os)
+static inline void
+lzx_compress_near_optimal(struct lzx_compressor * restrict c,
+ const u8 * const restrict in_begin,
+ struct lzx_output_bitstream * restrict os,
+ bool is_16_bit)
{
- const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
const u8 * const in_end = in_begin + c->in_nbytes;
- unsigned max_len = LZX_MAX_MATCH_LEN;
- unsigned nice_len = min(c->nice_match_length, max_len);
- u32 next_hash;
struct lzx_lru_queue queue;
- bt_matchfinder_init(&c->bt_mf);
- matchfinder_init(c->hash2_tab, LZX_HASH2_LENGTH);
- next_hash = bt_matchfinder_hash_3_bytes(in_next);
+ lcpit_matchfinder_load_buffer(&c->lcpit_mf, in_begin, c->in_nbytes);
lzx_lru_queue_init(&queue);
do {
/* Starting a new block */
const u8 * const in_block_begin = in_next;
- const u8 * const in_block_end =
- in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
-
- /* Run the block through the matchfinder and cache the matches. */
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_LENGTH, in_end - in_next);
struct lz_match *cache_ptr = c->match_cache;
- do {
- struct lz_match *lz_matchptr;
- u32 hash2;
- pos_t cur_match;
- unsigned best_len;
+ const u8 *next_observation = in_next;
+ const u8 *next_pause_point = min(in_next + MIN_BLOCK_LENGTH,
+ in_max_block_end - LZX_MAX_MATCH_LEN - 1);
- /* If approaching the end of the input buffer, adjust
- * 'max_len' and 'nice_len' accordingly. */
- if (unlikely(max_len > in_end - in_next)) {
- max_len = in_end - in_next;
- nice_len = min(max_len, nice_len);
+ init_block_split_stats(&c->split_stats);
- /* This extra check is needed to ensure that
- * reading the next 3 bytes when looking for a
- * length 2 match is valid. In addition, we
- * cannot allow ourselves to find a length 2
- * match of the very last two bytes with the
- * very first two bytes, since such a match has
- * an offset too large to be represented. */
- if (unlikely(max_len <
- max(LZ_HASH_REQUIRED_NBYTES, 3)))
- {
- in_next++;
- cache_ptr->length = 0;
- cache_ptr++;
- continue;
+ /* Run the block through the matchfinder and cache the matches. */
+ enter_mf_loop:
+ do {
+ u32 num_matches;
+ u32 best_len = 0;
+
+ num_matches = lcpit_matchfinder_get_matches(&c->lcpit_mf, cache_ptr + 1);
+ cache_ptr->length = num_matches;
+ if (num_matches)
+ best_len = cache_ptr[1].length;
+
+ if (in_next >= next_observation) {
+ if (best_len) {
+ observe_match(&c->split_stats, best_len);
+ next_observation = in_next + best_len;
+ } else {
+ observe_literal(&c->split_stats, *in_next);
+ next_observation = in_next + 1;
}
}
- lz_matchptr = cache_ptr + 1;
-
- /* Check for a length 2 match. */
- hash2 = lz_hash_2_bytes(in_next);
- cur_match = c->hash2_tab[hash2];
- c->hash2_tab[hash2] = in_next - in_begin;
- if (matchfinder_node_valid(cur_match) &&
- (LZX_HASH2_ORDER == 16 ||
- load_u16_unaligned(&in_begin[cur_match]) ==
- load_u16_unaligned(in_next)) &&
- in_begin[cur_match + 2] != in_next[2])
- {
- lz_matchptr->length = 2;
- lz_matchptr->offset = in_next - &in_begin[cur_match];
- lz_matchptr++;
- }
-
- /* Check for matches of length >= 3. */
- lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
- in_begin,
- in_next,
- 3,
- max_len,
- nice_len,
- c->max_search_depth,
- &next_hash,
- &best_len,
- lz_matchptr);
- in_next++;
- cache_ptr->length = lz_matchptr - (cache_ptr + 1);
- cache_ptr = lz_matchptr;
-
/*
* If there was a very long match found, then don't
* cache any matches for the bytes covered by that
* data must be highly compressible, so it doesn't
* matter as much what we do.
*/
- if (best_len >= nice_len) {
- --best_len;
- do {
- if (unlikely(max_len > in_end - in_next)) {
- max_len = in_end - in_next;
- nice_len = min(max_len, nice_len);
- if (unlikely(max_len <
- max(LZ_HASH_REQUIRED_NBYTES, 3)))
- {
- in_next++;
- cache_ptr->length = 0;
- cache_ptr++;
- continue;
- }
- }
- c->hash2_tab[lz_hash_2_bytes(in_next)] =
- in_next - in_begin;
- bt_matchfinder_skip_position(&c->bt_mf,
- in_begin,
- in_next,
- in_end,
- nice_len,
- c->max_search_depth,
- &next_hash);
- in_next++;
+ if (best_len >= c->nice_match_length) {
+ best_len = lz_extend(in_next, in_next - cache_ptr[1].offset,
+ best_len,
+ min(LZX_MAX_MATCH_LEN,
+ in_end - in_next));
+ cache_ptr[1].length = best_len;
+ lcpit_matchfinder_skip_bytes(&c->lcpit_mf, best_len - 1);
+ cache_ptr += 1 + num_matches;
+ for (u32 i = 0; i < best_len - 1; i++) {
cache_ptr->length = 0;
cache_ptr++;
- } while (--best_len);
+ }
+ in_next += best_len;
+ next_observation = in_next;
+ } else {
+ cache_ptr += 1 + num_matches;
+ in_next++;
}
- } while (in_next < in_block_end &&
- likely(cache_ptr < c->cache_overflow_mark));
+ } while (in_next < next_pause_point &&
+ likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
+ if (unlikely(cache_ptr >= &c->match_cache[LZX_CACHE_LENGTH]))
+ goto flush_block;
+
+ if (in_next >= in_max_block_end)
+ goto flush_block;
+
+ if (c->split_stats.num_new_observations >= NUM_OBSERVATIONS_PER_BLOCK_CHECK) {
+ if (do_end_block_check(&c->split_stats, in_next - in_block_begin))
+ goto flush_block;
+ if (in_max_block_end - in_next <= MIN_BLOCK_LENGTH)
+ next_observation = in_max_block_end;
+ }
+
+ next_pause_point = min(in_next +
+ NUM_OBSERVATIONS_PER_BLOCK_CHECK * 2 -
+ c->split_stats.num_new_observations,
+ in_max_block_end - LZX_MAX_MATCH_LEN - 1);
+ goto enter_mf_loop;
+
+ flush_block:
/* We've finished running the block through the matchfinder.
* Now choose a match/literal sequence and write the block. */
queue = lzx_optimize_and_write_block(c, os, in_block_begin,
in_next - in_block_begin,
- queue);
+ queue, is_16_bit);
} while (in_next != in_end);
}
+static void
+lzx_compress_near_optimal_16(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, c->in_buffer, os, true);
+}
+
+static void
+lzx_compress_near_optimal_32(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, c->in_buffer, os, false);
+}
+
/*
* Given a pointer to the current byte sequence and the current list of recent
* match offsets, find the longest repeat offset match.
static unsigned
lzx_find_longest_repeat_offset_match(const u8 * const in_next,
const u32 bytes_remaining,
- struct lzx_lru_queue queue,
+ const u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
unsigned *rep_max_idx_ret)
{
- BUILD_BUG_ON(LZX_NUM_RECENT_OFFSETS != 3);
- LZX_ASSERT(bytes_remaining >= 2);
+ STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
const u16 next_2_bytes = load_u16_unaligned(in_next);
unsigned rep_max_idx;
unsigned rep_len;
- matchptr = in_next - lzx_lru_queue_pop(&queue);
+ matchptr = in_next - recent_offsets[0];
if (load_u16_unaligned(matchptr) == next_2_bytes)
rep_max_len = lz_extend(in_next, matchptr, 2, max_len);
else
rep_max_len = 0;
rep_max_idx = 0;
- matchptr = in_next - lzx_lru_queue_pop(&queue);
+ matchptr = in_next - recent_offsets[1];
if (load_u16_unaligned(matchptr) == next_2_bytes) {
rep_len = lz_extend(in_next, matchptr, 2, max_len);
if (rep_len > rep_max_len) {
}
}
- matchptr = in_next - lzx_lru_queue_pop(&queue);
+ matchptr = in_next - recent_offsets[2];
if (load_u16_unaligned(matchptr) == next_2_bytes) {
rep_len = lz_extend(in_next, matchptr, 2, max_len);
if (rep_len > rep_max_len) {
}
/* This is the "lazy" LZX compressor. */
-static void
-lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+static inline void
+lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+ bool is_16_bit)
{
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
const u8 * const in_end = in_begin + c->in_nbytes;
unsigned max_len = LZX_MAX_MATCH_LEN;
unsigned nice_len = min(c->nice_match_length, max_len);
- struct lzx_lru_queue queue;
+ STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
+ u32 recent_offsets[3] = {1, 1, 1};
+ u32 next_hashes[2] = {};
- hc_matchfinder_init(&c->hc_mf);
- lzx_lru_queue_init(&queue);
+ CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
do {
/* Starting a new block */
const u8 * const in_block_begin = in_next;
- const u8 * const in_block_end =
- in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
- struct lzx_item *next_chosen_item = c->chosen_items;
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_LENGTH, in_end - in_next);
+ struct lzx_sequence *next_seq = c->chosen_sequences;
unsigned cur_len;
u32 cur_offset;
u32 cur_offset_data;
unsigned rep_max_idx;
unsigned rep_score;
unsigned skip_len;
+ u32 litrunlen = 0;
lzx_reset_symbol_frequencies(c);
+ init_block_split_stats(&c->split_stats);
do {
if (unlikely(max_len > in_end - in_next)) {
/* Find the longest match at the current position. */
- cur_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next,
- 2,
- max_len,
- nice_len,
- c->max_search_depth,
- &cur_offset);
+ cur_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ 2,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &cur_offset);
if (cur_len < 3 ||
(cur_len == 3 &&
cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
- cur_offset != lzx_lru_queue_R0(queue) &&
- cur_offset != lzx_lru_queue_R1(queue) &&
- cur_offset != lzx_lru_queue_R2(queue)))
+ cur_offset != recent_offsets[0] &&
+ cur_offset != recent_offsets[1] &&
+ cur_offset != recent_offsets[2]))
{
/* There was no match found, or the only match found
* was a distant length 3 match. Output a literal. */
- lzx_declare_literal(c, *in_next++,
- &next_chosen_item);
+ lzx_record_literal(c, *in_next, &litrunlen);
+ observe_literal(&c->split_stats, *in_next);
+ in_next++;
continue;
}
- if (cur_offset == lzx_lru_queue_R0(queue)) {
+ observe_match(&c->split_stats, cur_len);
+
+ if (cur_offset == recent_offsets[0]) {
in_next++;
cur_offset_data = 0;
skip_len = cur_len - 1;
/* Consider a repeat offset match */
rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
in_end - in_next,
- queue,
+ recent_offsets,
&rep_max_idx);
in_next++;
nice_len = min(max_len, nice_len);
}
- next_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next,
- cur_len - 2,
- max_len,
- nice_len,
- c->max_search_depth / 2,
- &next_offset);
+ next_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ cur_len - 2,
+ max_len,
+ nice_len,
+ c->max_search_depth / 2,
+ next_hashes,
+ &next_offset);
if (next_len <= cur_len - 2) {
in_next++;
rep_max_len = lzx_find_longest_repeat_offset_match(in_next,
in_end - in_next,
- queue,
+ recent_offsets,
&rep_max_idx);
in_next++;
if (rep_score > cur_score) {
/* The next match is better, and it's a
* repeat offset match. */
- lzx_declare_literal(c, *(in_next - 2),
- &next_chosen_item);
+ lzx_record_literal(c, *(in_next - 2),
+ &litrunlen);
cur_len = rep_max_len;
cur_offset_data = rep_max_idx;
skip_len = cur_len - 1;
if (next_score > cur_score) {
/* The next match is better, and it's an
* explicit offset match. */
- lzx_declare_literal(c, *(in_next - 2),
- &next_chosen_item);
+ lzx_record_literal(c, *(in_next - 2),
+ &litrunlen);
cur_len = next_len;
cur_offset_data = next_offset_data;
cur_score = next_score;
skip_len = cur_len - 2;
choose_cur_match:
- if (cur_offset_data < LZX_NUM_RECENT_OFFSETS) {
- lzx_declare_repeat_offset_match(c, cur_len,
- cur_offset_data,
- &next_chosen_item);
- queue = lzx_lru_queue_swap(queue, cur_offset_data);
- } else {
- lzx_declare_explicit_offset_match(c, cur_len,
- cur_offset_data - LZX_OFFSET_ADJUSTMENT,
- &next_chosen_item);
- queue = lzx_lru_queue_push(queue, cur_offset_data - LZX_OFFSET_ADJUSTMENT);
- }
-
- hc_matchfinder_skip_positions(&c->hc_mf,
- in_begin,
- in_next,
- in_end,
- skip_len);
- in_next += skip_len;
- } while (in_next < in_block_end);
+ lzx_record_match(c, cur_len, cur_offset_data,
+ recent_offsets, is_16_bit,
+ &litrunlen, &next_seq);
+ in_next = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_skip_positions,
+ in_begin,
+ in_next - in_begin,
+ in_end - in_begin,
+ skip_len,
+ next_hashes);
+ } while (in_next < in_max_block_end &&
+ !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
+
+ lzx_finish_sequence(next_seq, litrunlen);
+
+ lzx_flush_block(c, os, in_block_begin, in_next - in_block_begin, 0);
- lzx_finish_block(c, os, in_next - in_block_begin,
- next_chosen_item - c->chosen_items);
} while (in_next != in_end);
}
static void
-lzx_init_offset_slot_fast(struct lzx_compressor *c)
+lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
{
- u8 slot = 0;
+ lzx_compress_lazy(c, os, true);
+}
- for (u32 offset = 0; offset < LZX_NUM_FAST_OFFSETS; offset++) {
+static void
+lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+ lzx_compress_lazy(c, os, false);
+}
- while (offset + LZX_OFFSET_ADJUSTMENT >= lzx_offset_slot_base[slot + 1])
+/* Generate the acceleration tables for offset slots. */
+static void
+lzx_init_offset_slot_tabs(struct lzx_compressor *c)
+{
+ u32 adjusted_offset = 0;
+ unsigned slot = 0;
+
+ /* slots [0, 29] */
+ for (; adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1);
+ adjusted_offset++)
+ {
+ if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
slot++;
+ c->offset_slot_tab_1[adjusted_offset] = slot;
+ }
- c->offset_slot_fast[offset] = slot;
+ /* slots [30, 49] */
+ for (; adjusted_offset < LZX_MAX_WINDOW_SIZE;
+ adjusted_offset += (u32)1 << 14)
+ {
+ if (adjusted_offset >= lzx_offset_slot_base[slot + 1])
+ slot++;
+ c->offset_slot_tab_2[adjusted_offset >> 14] = slot;
}
}
lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
{
if (compression_level <= LZX_MAX_FAST_LEVEL) {
- return offsetof(struct lzx_compressor, hc_mf) +
- hc_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, hc_mf_16) +
+ hc_matchfinder_size_16(max_bufsize);
+ else
+ return offsetof(struct lzx_compressor, hc_mf_32) +
+ hc_matchfinder_size_32(max_bufsize);
} else {
- return offsetof(struct lzx_compressor, bt_mf) +
- bt_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, lcpit_mf) +
+ sizeof(struct lcpit_matchfinder);
+ else
+ return offsetof(struct lzx_compressor, lcpit_mf) +
+ sizeof(struct lcpit_matchfinder);
}
}
static u64
-lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
+lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
+ bool destructive)
{
u64 size = 0;
return 0;
size += lzx_get_compressor_size(max_bufsize, compression_level);
- size += max_bufsize; /* in_buffer */
+ if (!destructive)
+ size += max_bufsize; /* in_buffer */
+ if (compression_level > LZX_MAX_FAST_LEVEL)
+ size += lcpit_matchfinder_get_needed_memory(max_bufsize);
return size;
}
static int
lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
- void **c_ret)
+ bool destructive, void **c_ret)
{
unsigned window_order;
struct lzx_compressor *c;
if (window_order == 0)
return WIMLIB_ERR_INVALID_PARAM;
- c = ALIGNED_MALLOC(lzx_get_compressor_size(max_bufsize,
- compression_level),
- MATCHFINDER_ALIGNMENT);
+ c = MALLOC(lzx_get_compressor_size(max_bufsize, compression_level));
if (!c)
goto oom0;
+ c->destructive = destructive;
+
c->num_main_syms = lzx_get_num_main_syms(window_order);
c->window_order = window_order;
- c->in_buffer = MALLOC(max_bufsize);
- if (!c->in_buffer)
- goto oom1;
+ if (!c->destructive) {
+ c->in_buffer = MALLOC(max_bufsize);
+ if (!c->in_buffer)
+ goto oom1;
+ }
if (compression_level <= LZX_MAX_FAST_LEVEL) {
/* Fast compression: Use lazy parsing. */
- c->impl = lzx_compress_lazy;
- c->max_search_depth = (36 * compression_level) / 20;
- c->nice_match_length = min((72 * compression_level) / 20,
- LZX_MAX_MATCH_LEN);
-
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_lazy_16;
+ else
+ c->impl = lzx_compress_lazy_32;
+ c->max_search_depth = (60 * compression_level) / 20;
+ c->nice_match_length = (80 * compression_level) / 20;
+
+ /* lzx_compress_lazy() needs max_search_depth >= 2 because it
+ * halves the max_search_depth when attempting a lazy match, and
+ * max_search_depth cannot be 0. */
+ if (c->max_search_depth < 2)
+ c->max_search_depth = 2;
} else {
/* Normal / high compression: Use near-optimal parsing. */
- c->impl = lzx_compress_near_optimal;
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_near_optimal_16;
+ else
+ c->impl = lzx_compress_near_optimal_32;
/* Scale nice_match_length and max_search_depth with the
* compression level. */
c->max_search_depth = (24 * compression_level) / 50;
- c->nice_match_length = min((32 * compression_level) / 50,
- LZX_MAX_MATCH_LEN);
+ c->nice_match_length = (48 * compression_level) / 50;
/* Set a number of optimization passes appropriate for the
* compression level. */
if (compression_level >= 300)
c->num_optim_passes++;
}
-
- c->cache_overflow_mark = &c->match_cache[LZX_CACHE_LEN];
}
- lzx_init_offset_slot_fast(c);
+ /* max_search_depth == 0 is invalid. */
+ if (c->max_search_depth < 1)
+ c->max_search_depth = 1;
+
+ if (c->nice_match_length > LZX_MAX_MATCH_LEN)
+ c->nice_match_length = LZX_MAX_MATCH_LEN;
+
+ if (!lcpit_matchfinder_init(&c->lcpit_mf, max_bufsize,
+ LZX_MIN_MATCH_LEN, c->nice_match_length))
+ goto oom2;
+
+ lzx_init_offset_slot_tabs(c);
*c_ret = c;
return 0;
+oom2:
+ if (!c->destructive)
+ FREE(c->in_buffer);
oom1:
- ALIGNED_FREE(c);
+ FREE(c);
oom0:
return WIMLIB_ERR_NOMEM;
}
static size_t
-lzx_compress(const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail, void *_c)
+lzx_compress(const void *restrict in, size_t in_nbytes,
+ void *restrict out, size_t out_nbytes_avail, void *restrict _c)
{
struct lzx_compressor *c = _c;
struct lzx_output_bitstream os;
+ size_t result;
/* Don't bother trying to compress very small inputs. */
if (in_nbytes < 100)
return 0;
/* Copy the input data into the internal buffer and preprocess it. */
- memcpy(c->in_buffer, in, in_nbytes);
+ if (c->destructive)
+ c->in_buffer = (void *)in;
+ else
+ memcpy(c->in_buffer, in, in_nbytes);
c->in_nbytes = in_nbytes;
- lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
+ lzx_preprocess(c->in_buffer, in_nbytes);
/* Initially, the previous Huffman codeword lengths are all zeroes. */
c->codes_index = 0;
(*c->impl)(c, &os);
/* Flush the output bitstream and return the compressed size or 0. */
- return lzx_flush_output(&os);
+ result = lzx_flush_output(&os);
+ if (!result && c->destructive)
+ lzx_postprocess(c->in_buffer, c->in_nbytes);
+ return result;
}
static void
{
struct lzx_compressor *c = _c;
- FREE(c->in_buffer);
- ALIGNED_FREE(c);
+ lcpit_matchfinder_destroy(&c->lcpit_mf);
+ if (!c->destructive)
+ FREE(c->in_buffer);
+ FREE(c);
}
const struct compressor_ops lzx_compressor_ops = {