*/
/*
- * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
+ * Copyright (C) 2012-2016 Eric Biggers
*
* This file is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
#endif
/*
- * Start a new LZX block (with new Huffman codes) after this many bytes.
- *
- * Note: actual block sizes may slightly exceed this value.
- *
- * TODO: recursive splitting and cost evaluation might be good for an extremely
- * high compression mode, but otherwise it is almost always far too slow for how
- * much it helps. Perhaps some sort of heuristic would be useful?
+ * The compressor always chooses a block of at least MIN_BLOCK_SIZE bytes,
+ * except if the last block has to be shorter.
*/
-#define LZX_DIV_BLOCK_SIZE 32768
+#define MIN_BLOCK_SIZE 6500
/*
- * LZX_CACHE_PER_POS is the number of lz_match structures to reserve in the
- * match cache for each byte position. This value should be high enough so that
- * nearly the time, all matches found in a given block can fit in the match
- * cache. However, fallback behavior (immediately terminating the block) on
- * cache overflow is still required.
+ * The compressor attempts to end blocks after SOFT_MAX_BLOCK_SIZE bytes, but
+ * the final size might be larger due to matches extending beyond the end of the
+ * block. Specifically:
+ *
+ * - The greedy parser may choose an arbitrarily long match starting at the
+ * SOFT_MAX_BLOCK_SIZE'th byte.
+ *
+ * - The lazy parser may choose a sequence of literals starting at the
+ * SOFT_MAX_BLOCK_SIZE'th byte when it sees a sequence of increasing good
+ * matches. The final match may be of arbitrary length. The length of the
+ * literal sequence is approximately limited by the "nice match length"
+ * parameter.
*/
-#define LZX_CACHE_PER_POS 7
+#define SOFT_MAX_BLOCK_SIZE 100000
/*
* LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
- * excluding the extra "overflow" entries. The per-position multiplier is '1 +
- * LZX_CACHE_PER_POS' instead of 'LZX_CACHE_PER_POS' because there is an
- * overhead of one lz_match per position, used to hold the match count at that
- * position.
+ * excluding the extra "overflow" entries. This value should be high enough so
+ * that nearly the time, all matches found in a given block can fit in the match
+ * cache. However, fallback behavior (immediately terminating the block) on
+ * cache overflow is still required.
*/
-#define LZX_CACHE_LENGTH (LZX_DIV_BLOCK_SIZE * (1 + LZX_CACHE_PER_POS))
+#define LZX_CACHE_LENGTH (SOFT_MAX_BLOCK_SIZE * 5)
/*
* LZX_MAX_MATCHES_PER_POS is an upper bound on the number of matches that can
#define LZX_BIT_COST 16
/*
- * Consideration of aligned offset costs is disabled for now, due to
- * insufficient benefit gained from the time spent.
+ * Should the compressor take into account the costs of aligned offset symbols?
*/
-#define LZX_CONSIDER_ALIGNED_COSTS 0
+#define LZX_CONSIDER_ALIGNED_COSTS 1
/*
* LZX_MAX_FAST_LEVEL is the maximum compression level at which we use the
#define LZX_MAX_FAST_LEVEL 34
/*
- * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
- * for finding length 2 matches. This can be as high as 16 (in which case the
- * hash function is trivial), but using a smaller hash table speeds up
- * compression due to reduced cache pressure.
+ * BT_MATCHFINDER_HASH2_ORDER is the log base 2 of the number of entries in the
+ * hash table for finding length 2 matches. This could be as high as 16, but
+ * using a smaller hash table speeds up compression due to reduced cache
+ * pressure.
*/
-#define LZX_HASH2_ORDER 12
-#define LZX_HASH2_LENGTH (1UL << LZX_HASH2_ORDER)
+#define BT_MATCHFINDER_HASH2_ORDER 12
/*
* These are the compressor-side limits on the codeword lengths for each Huffman
#define ALIGNED_CODEWORD_LIMIT 7
#define PRE_CODEWORD_LIMIT 7
-#include "wimlib/lzx_common.h"
-
-/*
- * The maximum allowed window order for the matchfinder.
- */
-#define MATCHFINDER_MAX_WINDOW_ORDER LZX_MAX_WINDOW_ORDER
-
-#include <string.h>
-
-#include "wimlib/bt_matchfinder.h"
#include "wimlib/compress_common.h"
#include "wimlib/compressor_ops.h"
#include "wimlib/error.h"
-#include "wimlib/hc_matchfinder.h"
#include "wimlib/lz_extend.h"
+#include "wimlib/lzx_common.h"
#include "wimlib/unaligned.h"
#include "wimlib/util.h"
+/* Matchfinders with 16-bit positions */
+#define mf_pos_t u16
+#define MF_SUFFIX _16
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
+/* Matchfinders with 32-bit positions */
+#undef mf_pos_t
+#undef MF_SUFFIX
+#define mf_pos_t u32
+#define MF_SUFFIX _32
+#include "wimlib/bt_matchfinder.h"
+#include "wimlib/hc_matchfinder.h"
+
struct lzx_output_bitstream;
/* Codewords for the LZX Huffman codes. */
u32 aligned[LZX_ALIGNEDCODE_NUM_SYMBOLS];
};
+/* Block split statistics. See "Block splitting algorithm" below. */
+#define NUM_LITERAL_OBSERVATION_TYPES 8
+#define NUM_MATCH_OBSERVATION_TYPES 2
+#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
+struct block_split_stats {
+ u32 new_observations[NUM_OBSERVATION_TYPES];
+ u32 observations[NUM_OBSERVATION_TYPES];
+ u32 num_new_observations;
+ u32 num_observations;
+};
+
/*
* Represents a run of literals followed by a match or end-of-block. This
* struct is needed to temporarily store items chosen by the parser, since items
u16 adjusted_length;
/* If bit 31 is clear, then this field contains the match header in bits
- * 0-8 and the match offset minus LZX_OFFSET_ADJUSTMENT in bits 9-30.
- * Otherwise, this sequence's literal run was the last literal run in
- * the block, so there is no match that follows it. */
+ * 0-8, and either the match offset plus LZX_OFFSET_ADJUSTMENT or a
+ * recent offset code in bits 9-30. Otherwise (if bit 31 is set), this
+ * sequence's literal run was the last literal run in the block, so
+ * there is no match that follows it. */
u32 adjusted_offset_and_match_hdr;
};
};
}
-/* Pop a match offset off the front (most recently used) end of the queue. */
-static inline u32
-lzx_lru_queue_pop(struct lzx_lru_queue *queue_p)
-{
- u32 offset = queue_p->R & LZX_QUEUE64_OFFSET_MASK;
- queue_p->R >>= LZX_QUEUE64_OFFSET_SHIFT;
- return offset;
-}
-
/* Swap a match offset to the front of the queue. */
static inline struct lzx_lru_queue
lzx_lru_queue_swap(struct lzx_lru_queue queue, unsigned idx)
/* The Huffman symbol frequency counters for the current block. */
struct lzx_freqs freqs;
+ /* Block split statistics. */
+ struct block_split_stats split_stats;
+
/* The Huffman codes for the current and previous blocks. The one with
* index 'codes_index' is for the current block, and the other one is
* for the previous block. */
/* The matches and literals that the parser has chosen for the current
* block. The required length of this array is limited by the maximum
- * number of matches that can ever be chosen for a single block. */
- struct lzx_sequence chosen_sequences[DIV_ROUND_UP(LZX_DIV_BLOCK_SIZE, LZX_MIN_MATCH_LEN)];
+ * number of matches that can ever be chosen for a single block, plus
+ * one for the special entry at the end. */
+ struct lzx_sequence chosen_sequences[
+ DIV_ROUND_UP(SOFT_MAX_BLOCK_SIZE, LZX_MIN_MATCH_LEN) + 1];
/* Tables for mapping adjusted offsets to offset slots */
/* Data for greedy or lazy parsing */
struct {
/* Hash chains matchfinder (MUST BE LAST!!!) */
- struct hc_matchfinder hc_mf;
+ union {
+ struct hc_matchfinder_16 hc_mf_16;
+ struct hc_matchfinder_32 hc_mf_32;
+ };
};
/* Data for near-optimal parsing */
struct {
/*
- * The graph nodes for the current block.
- *
- * We need at least 'LZX_DIV_BLOCK_SIZE +
- * LZX_MAX_MATCH_LEN - 1' nodes because that is the
- * maximum block size that may be used. Add 1 because
- * we need a node to represent end-of-block.
+ * Array of nodes, one per position, for running the
+ * minimum-cost path algorithm.
*
- * It is possible that nodes past end-of-block are
- * accessed during match consideration, but this can
- * only occur if the block was truncated at
- * LZX_DIV_BLOCK_SIZE. So the same bound still applies.
- * Note that since nodes past the end of the block will
- * never actually have an effect on the items that are
- * chosen for the block, it makes no difference what
- * their costs are initialized to (if anything).
+ * This array must be large enough to accommodate the
+ * worst-case number of nodes, which occurs if we find a
+ * match of length LZX_MAX_MATCH_LEN at position
+ * SOFT_MAX_BLOCK_SIZE - 1, producing a block of length
+ * SOFT_MAX_BLOCK_SIZE - 1 + LZX_MAX_MATCH_LEN. Add one
+ * for the end-of-block node.
*/
- struct lzx_optimum_node optimum_nodes[LZX_DIV_BLOCK_SIZE +
- LZX_MAX_MATCH_LEN - 1 + 1];
+ struct lzx_optimum_node optimum_nodes[SOFT_MAX_BLOCK_SIZE - 1 +
+ LZX_MAX_MATCH_LEN + 1];
/* The cost model for the current block */
struct lzx_costs costs;
LZX_MAX_MATCHES_PER_POS +
LZX_MAX_MATCH_LEN - 1];
- /* Hash table for finding length 2 matches */
- pos_t hash2_tab[LZX_HASH2_LENGTH];
-
/* Binary trees matchfinder (MUST BE LAST!!!) */
- struct bt_matchfinder bt_mf;
+ union {
+ struct bt_matchfinder_16 bt_mf_16;
+ struct bt_matchfinder_32 bt_mf_32;
+ };
};
};
};
+/*
+ * Will a matchfinder using 16-bit positions be sufficient for compressing
+ * buffers of up to the specified size? The limit could be 65536 bytes, but we
+ * also want to optimize out the use of offset_slot_tab_2 in the 16-bit case.
+ * This requires that the limit be no more than the length of offset_slot_tab_1
+ * (currently 32768).
+ */
+static inline bool
+lzx_is_16_bit(size_t max_bufsize)
+{
+ STATIC_ASSERT(ARRAY_LEN(((struct lzx_compressor *)0)->offset_slot_tab_1) == 32768);
+ return max_bufsize <= 32768;
+}
+
+/*
+ * The following macros call either the 16-bit or the 32-bit version of a
+ * matchfinder function based on the value of 'is_16_bit', which will be known
+ * at compilation time.
+ */
+
+#define CALL_HC_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->hc_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->hc_mf_32, ##__VA_ARGS__));
+
+#define CALL_BT_MF(is_16_bit, c, funcname, ...) \
+ ((is_16_bit) ? CONCAT(funcname, _16)(&(c)->bt_mf_16, ##__VA_ARGS__) : \
+ CONCAT(funcname, _32)(&(c)->bt_mf_32, ##__VA_ARGS__));
+
/*
* Structure to keep track of the current state of sending bits to the
* compressed output buffer.
/* Can the specified number of bits always be added to 'bitbuf' after any
* pending 16-bit coding units have been flushed? */
-#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 16)
+#define CAN_BUFFER(n) ((n) <= (8 * sizeof(machine_word_t)) - 15)
/*
* Initialize the output bitstream.
static inline void
lzx_flush_bits(struct lzx_output_bitstream *os, unsigned max_num_bits)
{
+ /* Masking the number of bits to shift is only needed to avoid undefined
+ * behavior; we don't actually care about the results of bad shifts. On
+ * x86, the explicit masking generates no extra code. */
+ const u32 shift_mask = 8 * sizeof(os->bitbuf) - 1;
+
if (os->end - os->next < 6)
return;
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 16), os->next + 0);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 16) &
+ shift_mask), os->next + 0);
if (max_num_bits > 16)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 32), os->next + 2);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 32) &
+ shift_mask), os->next + 2);
if (max_num_bits > 32)
- put_unaligned_u16_le(os->bitbuf >> (os->bitcount - 48), os->next + 4);
+ put_unaligned_le16(os->bitbuf >> ((os->bitcount - 48) &
+ shift_mask), os->next + 4);
os->next += (os->bitcount >> 4) << 1;
os->bitcount &= 15;
}
return 0;
if (os->bitcount != 0) {
- put_unaligned_u16_le(os->bitbuf << (16 - os->bitcount), os->next);
+ put_unaligned_le16(os->bitbuf << (16 - os->bitcount), os->next);
os->next += 2;
}
STATIC_ASSERT(MAIN_CODEWORD_LIMIT >= 9 &&
MAIN_CODEWORD_LIMIT <= LZX_MAX_MAIN_CODEWORD_LEN);
- STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 9 &&
+ STATIC_ASSERT(LENGTH_CODEWORD_LIMIT >= 8 &&
LENGTH_CODEWORD_LIMIT <= LZX_MAX_LEN_CODEWORD_LEN);
STATIC_ASSERT(ALIGNED_CODEWORD_LIMIT >= LZX_NUM_ALIGNED_OFFSET_BITS &&
ALIGNED_CODEWORD_LIMIT <= LZX_MAX_ALIGNED_CODEWORD_LEN);
unsigned lit1 = block_data[1];
unsigned lit2 = block_data[2];
unsigned lit3 = block_data[3];
- lzx_add_bits(os, codes->codewords.main[lit0], codes->lens.main[lit0]);
- lzx_add_bits(os, codes->codewords.main[lit1], codes->lens.main[lit1]);
- lzx_add_bits(os, codes->codewords.main[lit2], codes->lens.main[lit2]);
- lzx_add_bits(os, codes->codewords.main[lit3], codes->lens.main[lit3]);
+ lzx_add_bits(os, codes->codewords.main[lit0],
+ codes->lens.main[lit0]);
+ lzx_add_bits(os, codes->codewords.main[lit1],
+ codes->lens.main[lit1]);
+ lzx_add_bits(os, codes->codewords.main[lit2],
+ codes->lens.main[lit2]);
+ lzx_add_bits(os, codes->codewords.main[lit3],
+ codes->lens.main[lit3]);
lzx_flush_bits(os, 4 * MAIN_CODEWORD_LIMIT);
block_data += 4;
litrunlen -= 4;
}
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
if (litrunlen--) {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
lzx_flush_bits(os, 3 * MAIN_CODEWORD_LIMIT);
} else {
lzx_flush_bits(os, 2 * MAIN_CODEWORD_LIMIT);
/* 32-bit: write 1 literal at a time. */
do {
unsigned lit = *block_data++;
- lzx_add_bits(os, codes->codewords.main[lit], codes->lens.main[lit]);
+ lzx_add_bits(os, codes->codewords.main[lit],
+ codes->lens.main[lit]);
lzx_flush_bits(os, MAIN_CODEWORD_LIMIT);
} while (--litrunlen);
}
/* If needed, output the length symbol for the match. */
if (adjusted_length >= LZX_NUM_PRIMARY_LENS) {
- lzx_add_bits(os, codes->codewords.len[adjusted_length - LZX_NUM_PRIMARY_LENS],
- codes->lens.len[adjusted_length - LZX_NUM_PRIMARY_LENS]);
+ lzx_add_bits(os, codes->codewords.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS],
+ codes->lens.len[adjusted_length -
+ LZX_NUM_PRIMARY_LENS]);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, LENGTH_CODEWORD_LIMIT);
}
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, 14);
- lzx_add_bits(os, codes->codewords.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK],
- codes->lens.aligned[adjusted_offset & LZX_ALIGNED_OFFSET_BITMASK]);
+ lzx_add_bits(os, codes->codewords.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK],
+ codes->lens.aligned[adjusted_offset &
+ LZX_ALIGNED_OFFSET_BITMASK]);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, ALIGNED_CODEWORD_LIMIT);
} else {
+ STATIC_ASSERT(CAN_BUFFER(17));
+
lzx_add_bits(os, extra_bits, num_extra_bits);
if (!CAN_BUFFER(MAX_MATCH_BITS))
lzx_flush_bits(os, 17);
const struct lzx_lens * prev_lens,
struct lzx_output_bitstream * os)
{
- LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
- block_type == LZX_BLOCKTYPE_VERBATIM);
-
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
lzx_write_bits(os, block_type, 3);
* compressor's acceleration tables to speed up the mapping.
*/
static inline unsigned
-lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset)
+lzx_comp_get_offset_slot(struct lzx_compressor *c, u32 adjusted_offset,
+ bool is_16_bit)
{
- if (adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
+ if (is_16_bit || adjusted_offset < ARRAY_LEN(c->offset_slot_tab_1))
return c->offset_slot_tab_1[adjusted_offset];
return c->offset_slot_tab_2[adjusted_offset >> 14];
}
* offsets queue. */
static inline void
lzx_record_match(struct lzx_compressor *c, unsigned length, u32 offset_data,
- u32 recent_offsets[LZX_NUM_RECENT_OFFSETS],
+ u32 recent_offsets[LZX_NUM_RECENT_OFFSETS], bool is_16_bit,
u32 *litrunlen_p, struct lzx_sequence **next_seq_p)
{
u32 litrunlen = *litrunlen_p;
}
/* Compute the offset slot */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
/* Compute the match header. */
v += offset_slot * LZX_NUM_LEN_HEADERS;
last_seq->adjusted_offset_and_match_hdr = 0x80000000;
}
+/******************************************************************************/
+
+/*
+ * Block splitting algorithm. The problem is to decide when it is worthwhile to
+ * start a new block with new entropy codes. There is a theoretically optimal
+ * solution: recursively consider every possible block split, considering the
+ * exact cost of each block, and choose the minimum cost approach. But this is
+ * far too slow. Instead, as an approximation, we can count symbols and after
+ * every N symbols, compare the expected distribution of symbols based on the
+ * previous data with the actual distribution. If they differ "by enough", then
+ * start a new block.
+ *
+ * As an optimization and heuristic, we don't distinguish between every symbol
+ * but rather we combine many symbols into a single "observation type". For
+ * literals we only look at the high bits and low bits, and for matches we only
+ * look at whether the match is long or not. The assumption is that for typical
+ * "real" data, places that are good block boundaries will tend to be noticable
+ * based only on changes in these aggregate frequencies, without looking for
+ * subtle differences in individual symbols. For example, a change from ASCII
+ * bytes to non-ASCII bytes, or from few matches (generally less compressible)
+ * to many matches (generally more compressible), would be easily noticed based
+ * on the aggregates.
+ *
+ * For determining whether the frequency distributions are "different enough" to
+ * start a new block, the simply heuristic of splitting when the sum of absolute
+ * differences exceeds a constant seems to be good enough. We also add a number
+ * proportional to the block size so that the algorithm is more likely to end
+ * large blocks than small blocks. This reflects the general expectation that
+ * it will become increasingly beneficial to start a new block as the current
+ * blocks grows larger.
+ *
+ * Finally, for an approximation, it is not strictly necessary that the exact
+ * symbols being used are considered. With "near-optimal parsing", for example,
+ * the actual symbols that will be used are unknown until after the block
+ * boundary is chosen and the block has been optimized. Since the final choices
+ * cannot be used, we can use preliminary "greedy" choices instead.
+ */
+
+/* Initialize the block split statistics when starting a new block. */
+static void
+init_block_split_stats(struct block_split_stats *stats)
+{
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ stats->new_observations[i] = 0;
+ stats->observations[i] = 0;
+ }
+ stats->num_new_observations = 0;
+ stats->num_observations = 0;
+}
+
+/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
+ * literal, for 8 possible literal observation types. */
+static inline void
+observe_literal(struct block_split_stats *stats, u8 lit)
+{
+ stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
+ stats->num_new_observations++;
+}
+
+/* Match observation. Heuristic: use one observation type for "short match" and
+ * one observation type for "long match". */
+static inline void
+observe_match(struct block_split_stats *stats, unsigned length)
+{
+ stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 5)]++;
+ stats->num_new_observations++;
+}
+
+static bool
+do_end_block_check(struct block_split_stats *stats, u32 block_size)
+{
+ if (stats->num_observations > 0) {
+
+ /* Note: to avoid slow divisions, we do not divide by
+ * 'num_observations', but rather do all math with the numbers
+ * multiplied by 'num_observations'. */
+ u32 total_delta = 0;
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ u32 expected = stats->observations[i] * stats->num_new_observations;
+ u32 actual = stats->new_observations[i] * stats->num_observations;
+ u32 delta = (actual > expected) ? actual - expected :
+ expected - actual;
+ total_delta += delta;
+ }
+
+ /* Ready to end the block? */
+ if (total_delta + (block_size >> 10) * stats->num_observations >=
+ 200 * stats->num_observations)
+ return true;
+ }
+
+ for (int i = 0; i < NUM_OBSERVATION_TYPES; i++) {
+ stats->num_observations += stats->new_observations[i];
+ stats->observations[i] += stats->new_observations[i];
+ stats->new_observations[i] = 0;
+ }
+ stats->num_new_observations = 0;
+ return false;
+}
+
+static inline bool
+should_end_block(struct block_split_stats *stats,
+ const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
+{
+ /* Ready to check block split statistics? */
+ if (stats->num_new_observations < 250 ||
+ in_next - in_block_begin < MIN_BLOCK_SIZE ||
+ in_end - in_next < MIN_BLOCK_SIZE)
+ return false;
+
+ return do_end_block_check(stats, in_next - in_block_begin);
+}
+
+/******************************************************************************/
+
/*
* Given the minimum-cost path computed through the item graph for the current
* block, walk the path and count how many of each symbol in each Huffman-coded
* beginning of the block), but this doesn't matter because this function only
* computes frequencies.
*/
-static void
-lzx_tally_item_list(struct lzx_compressor *c, u32 block_size)
+static inline void
+lzx_tally_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
u32 node_idx = block_size;
for (;;) {
}
/* Tally the main symbol. */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
v += offset_slot * LZX_NUM_LEN_HEADERS;
c->freqs.main[LZX_NUM_CHARS + v]++;
* first-to-last order. The return value is the index in c->chosen_sequences at
* which the lzx_sequences begin.
*/
-static u32
-lzx_record_item_list(struct lzx_compressor *c, u32 block_size)
+static inline u32
+lzx_record_item_list(struct lzx_compressor *c, u32 block_size, bool is_16_bit)
{
u32 node_idx = block_size;
u32 seq_idx = ARRAY_LEN(c->chosen_sequences) - 1;
}
/* Tally the main symbol. */
- offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ offset_slot = lzx_comp_get_offset_slot(c, offset_data, is_16_bit);
v += offset_slot * LZX_NUM_LEN_HEADERS;
c->freqs.main[LZX_NUM_CHARS + v]++;
* later. The algorithm does not solve this problem; it only considers the
* lowest cost to reach each individual position.
*/
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
lzx_find_min_cost_path(struct lzx_compressor * const restrict c,
const u8 * const restrict block_begin,
const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
struct lzx_optimum_node *cur_node = c->optimum_nodes;
struct lzx_optimum_node * const end_node = &c->optimum_nodes[block_size];
do {
u32 offset = cache_ptr->offset;
u32 offset_data = offset + LZX_OFFSET_ADJUSTMENT;
- unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data);
+ unsigned offset_slot = lzx_comp_get_offset_slot(c, offset_data,
+ is_16_bit);
+ u32 base_cost = cur_node->cost;
+
+ #if LZX_CONSIDER_ALIGNED_COSTS
+ if (offset_data >= 16)
+ base_cost += c->costs.aligned[offset_data &
+ LZX_ALIGNED_OFFSET_BITMASK];
+ #endif
+
do {
- u32 cost = cur_node->cost +
+ u32 cost = base_cost +
c->costs.match_cost[offset_slot][
next_len - LZX_MIN_MATCH_LEN];
- #if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >=
- LZX_NUM_ALIGNED_OFFSET_BITS)
- cost += c->costs.aligned[offset_data &
- LZX_ALIGNED_OFFSET_BITMASK];
- #endif
if (cost < (cur_node + next_len)->cost) {
(cur_node + next_len)->cost = cost;
(cur_node + next_len)->item =
* of coding the literal is integrated into the queue update
* code below. */
literal = *in_next++;
- cost = cur_node->cost +
- c->costs.main[lzx_main_symbol_for_literal(literal)];
+ cost = cur_node->cost + c->costs.main[literal];
/* Advance to the next position. */
cur_node++;
static void
lzx_compute_match_costs(struct lzx_compressor *c)
{
- unsigned num_offset_slots = lzx_get_num_offset_slots(c->window_order);
+ unsigned num_offset_slots = (c->num_main_syms - LZX_NUM_CHARS) /
+ LZX_NUM_LEN_HEADERS;
struct lzx_costs *costs = &c->costs;
for (unsigned offset_slot = 0; offset_slot < num_offset_slots; offset_slot++) {
u32 extra_cost = (u32)lzx_extra_offset_bits[offset_slot] * LZX_BIT_COST;
- unsigned main_symbol = lzx_main_symbol_for_match(offset_slot, 0);
+ unsigned main_symbol = LZX_NUM_CHARS + (offset_slot *
+ LZX_NUM_LEN_HEADERS);
unsigned i;
#if LZX_CONSIDER_ALIGNED_COSTS
- if (lzx_extra_offset_bits[offset_slot] >= LZX_NUM_ALIGNED_OFFSET_BITS)
+ if (offset_slot >= 8)
extra_cost -= LZX_NUM_ALIGNED_OFFSET_BITS * LZX_BIT_COST;
#endif
unsigned i;
const struct lzx_lens *lens = &c->codes[c->codes_index].lens;
- for (i = 0; i < c->num_main_syms; i++)
- c->costs.main[i] = (lens->main[i] ? lens->main[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < c->num_main_syms; i++) {
+ c->costs.main[i] = (lens->main[i] ? lens->main[i] :
+ MAIN_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
- for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
- c->costs.len[i] = (lens->len[i] ? lens->len[i] : 15) * LZX_BIT_COST;
+ for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++) {
+ c->costs.len[i] = (lens->len[i] ? lens->len[i] :
+ LENGTH_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#if LZX_CONSIDER_ALIGNED_COSTS
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] : 7) * LZX_BIT_COST;
+ for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ c->costs.aligned[i] = (lens->aligned[i] ? lens->aligned[i] :
+ ALIGNED_CODEWORD_LIMIT) * LZX_BIT_COST;
+ }
#endif
lzx_compute_match_costs(c);
}
-static struct lzx_lru_queue
+static inline struct lzx_lru_queue
lzx_optimize_and_write_block(struct lzx_compressor * const restrict c,
struct lzx_output_bitstream * const restrict os,
const u8 * const restrict block_begin,
const u32 block_size,
- const struct lzx_lru_queue initial_queue)
+ const struct lzx_lru_queue initial_queue,
+ bool is_16_bit)
{
unsigned num_passes_remaining = c->num_optim_passes;
struct lzx_lru_queue new_queue;
lzx_reset_symbol_frequencies(c);
do {
new_queue = lzx_find_min_cost_path(c, block_begin, block_size,
- initial_queue);
+ initial_queue, is_16_bit);
if (num_passes_remaining > 1) {
- lzx_tally_item_list(c, block_size);
+ lzx_tally_item_list(c, block_size, is_16_bit);
lzx_make_huffman_codes(c);
lzx_update_costs(c);
lzx_reset_symbol_frequencies(c);
}
} while (--num_passes_remaining);
- seq_idx = lzx_record_item_list(c, block_size);
+ seq_idx = lzx_record_item_list(c, block_size, is_16_bit);
lzx_finish_block(c, os, block_begin, block_size, seq_idx);
return new_queue;
}
* time, but rather to produce a compression ratio significantly better than a
* simpler "greedy" or "lazy" parse while still being relatively fast.
*/
-static void
+static inline void
lzx_compress_near_optimal(struct lzx_compressor *c,
- struct lzx_output_bitstream *os)
+ struct lzx_output_bitstream *os,
+ bool is_16_bit)
{
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
const u8 * const in_end = in_begin + c->in_nbytes;
- unsigned max_len = LZX_MAX_MATCH_LEN;
- unsigned nice_len = min(c->nice_match_length, max_len);
- u32 next_hash;
+ u32 max_len = LZX_MAX_MATCH_LEN;
+ u32 nice_len = min(c->nice_match_length, max_len);
+ u32 next_hashes[2] = {};
struct lzx_lru_queue queue;
- bt_matchfinder_init(&c->bt_mf);
- memset(c->hash2_tab, 0, sizeof(c->hash2_tab));
- next_hash = bt_matchfinder_hash_3_bytes(in_next);
+ CALL_BT_MF(is_16_bit, c, bt_matchfinder_init);
lzx_lru_queue_init(&queue);
do {
/* Starting a new block */
const u8 * const in_block_begin = in_next;
- const u8 * const in_block_end =
- in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
+ const u8 *next_observation = in_next;
+
+ init_block_split_stats(&c->split_stats);
/* Run the block through the matchfinder and cache the matches. */
struct lz_match *cache_ptr = c->match_cache;
do {
struct lz_match *lz_matchptr;
- u32 hash2;
- pos_t cur_match;
- unsigned best_len;
+ u32 best_len;
/* If approaching the end of the input buffer, adjust
* 'max_len' and 'nice_len' accordingly. */
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
-
- /* This extra check is needed to ensure that we
- * never output a length 2 match of the very
- * last two bytes with the very first two bytes,
- * since such a match has an offset too large to
- * be represented. */
- if (unlikely(max_len < 3)) {
+ if (unlikely(max_len <
+ BT_MATCHFINDER_REQUIRED_NBYTES))
+ {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
}
}
- lz_matchptr = cache_ptr + 1;
-
- /* Check for a length 2 match. */
- hash2 = lz_hash_2_bytes(in_next, LZX_HASH2_ORDER);
- cur_match = c->hash2_tab[hash2];
- c->hash2_tab[hash2] = in_next - in_begin;
- if (cur_match != 0 &&
- (LZX_HASH2_ORDER == 16 ||
- load_u16_unaligned(&in_begin[cur_match]) ==
- load_u16_unaligned(in_next)))
- {
- lz_matchptr->length = 2;
- lz_matchptr->offset = in_next - &in_begin[cur_match];
- lz_matchptr++;
+ /* Check for matches. */
+ lz_matchptr = CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_get_matches,
+ in_begin,
+ in_next - in_begin,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &best_len,
+ cache_ptr + 1);
+
+ if (in_next >= next_observation) {
+ best_len = 0;
+ if (lz_matchptr > cache_ptr + 1)
+ best_len = (lz_matchptr - 1)->length;
+ if (best_len >= 2) {
+ observe_match(&c->split_stats, best_len);
+ next_observation = in_next + best_len;
+ } else {
+ observe_literal(&c->split_stats, *in_next);
+ next_observation = in_next + 1;
+ }
}
- /* Check for matches of length >= 3. */
- lz_matchptr = bt_matchfinder_get_matches(&c->bt_mf,
- in_begin,
- in_next,
- 3,
- max_len,
- nice_len,
- c->max_search_depth,
- &next_hash,
- &best_len,
- lz_matchptr);
in_next++;
cache_ptr->length = lz_matchptr - (cache_ptr + 1);
cache_ptr = lz_matchptr;
if (unlikely(max_len > in_end - in_next)) {
max_len = in_end - in_next;
nice_len = min(max_len, nice_len);
- if (unlikely(max_len < 3)) {
+ if (unlikely(max_len <
+ BT_MATCHFINDER_REQUIRED_NBYTES))
+ {
in_next++;
cache_ptr->length = 0;
cache_ptr++;
continue;
}
}
- c->hash2_tab[lz_hash_2_bytes(in_next, LZX_HASH2_ORDER)] =
- in_next - in_begin;
- bt_matchfinder_skip_position(&c->bt_mf,
- in_begin,
- in_next,
- in_end,
- nice_len,
- c->max_search_depth,
- &next_hash);
+ CALL_BT_MF(is_16_bit, c,
+ bt_matchfinder_skip_position,
+ in_begin,
+ in_next - in_begin,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes);
in_next++;
cache_ptr->length = 0;
cache_ptr++;
} while (--best_len);
}
- } while (in_next < in_block_end &&
- likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]));
+ } while (in_next < in_max_block_end &&
+ likely(cache_ptr < &c->match_cache[LZX_CACHE_LENGTH]) &&
+ !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
/* We've finished running the block through the matchfinder.
* Now choose a match/literal sequence and write the block. */
queue = lzx_optimize_and_write_block(c, os, in_block_begin,
in_next - in_block_begin,
- queue);
+ queue, is_16_bit);
} while (in_next != in_end);
}
+static void
+lzx_compress_near_optimal_16(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, os, true);
+}
+
+static void
+lzx_compress_near_optimal_32(struct lzx_compressor *c,
+ struct lzx_output_bitstream *os)
+{
+ lzx_compress_near_optimal(c, os, false);
+}
+
/*
* Given a pointer to the current byte sequence and the current list of recent
* match offsets, find the longest repeat offset match.
unsigned *rep_max_idx_ret)
{
STATIC_ASSERT(LZX_NUM_RECENT_OFFSETS == 3);
- LZX_ASSERT(bytes_remaining >= 2);
const unsigned max_len = min(bytes_remaining, LZX_MAX_MATCH_LEN);
const u16 next_2_bytes = load_u16_unaligned(in_next);
}
/* This is the "lazy" LZX compressor. */
-static void
-lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+static inline void
+lzx_compress_lazy(struct lzx_compressor *c, struct lzx_output_bitstream *os,
+ bool is_16_bit)
{
const u8 * const in_begin = c->in_buffer;
const u8 * in_next = in_begin;
u32 recent_offsets[3] = {1, 1, 1};
u32 next_hashes[2] = {};
- hc_matchfinder_init(&c->hc_mf);
+ CALL_HC_MF(is_16_bit, c, hc_matchfinder_init);
do {
/* Starting a new block */
const u8 * const in_block_begin = in_next;
- const u8 * const in_block_end =
- in_next + min(LZX_DIV_BLOCK_SIZE, in_end - in_next);
+ const u8 * const in_max_block_end =
+ in_next + min(SOFT_MAX_BLOCK_SIZE, in_end - in_next);
struct lzx_sequence *next_seq = c->chosen_sequences;
unsigned cur_len;
u32 cur_offset;
u32 litrunlen = 0;
lzx_reset_symbol_frequencies(c);
+ init_block_split_stats(&c->split_stats);
do {
if (unlikely(max_len > in_end - in_next)) {
/* Find the longest match at the current position. */
- cur_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- 2,
- max_len,
- nice_len,
- c->max_search_depth,
- next_hashes,
- &cur_offset);
+ cur_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ 2,
+ max_len,
+ nice_len,
+ c->max_search_depth,
+ next_hashes,
+ &cur_offset);
if (cur_len < 3 ||
(cur_len == 3 &&
cur_offset >= 8192 - LZX_OFFSET_ADJUSTMENT &&
{
/* There was no match found, or the only match found
* was a distant length 3 match. Output a literal. */
- lzx_record_literal(c, *in_next++, &litrunlen);
+ lzx_record_literal(c, *in_next, &litrunlen);
+ observe_literal(&c->split_stats, *in_next);
+ in_next++;
continue;
}
+ observe_match(&c->split_stats, cur_len);
+
if (cur_offset == recent_offsets[0]) {
in_next++;
cur_offset_data = 0;
nice_len = min(max_len, nice_len);
}
- next_len = hc_matchfinder_longest_match(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- cur_len - 2,
- max_len,
- nice_len,
- c->max_search_depth / 2,
- next_hashes,
- &next_offset);
+ next_len = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_longest_match,
+ in_begin,
+ in_next - in_begin,
+ cur_len - 2,
+ max_len,
+ nice_len,
+ c->max_search_depth / 2,
+ next_hashes,
+ &next_offset);
if (next_len <= cur_len - 2) {
in_next++;
choose_cur_match:
lzx_record_match(c, cur_len, cur_offset_data,
- recent_offsets, &litrunlen, &next_seq);
- in_next = hc_matchfinder_skip_positions(&c->hc_mf,
- in_begin,
- in_next - in_begin,
- in_end - in_begin,
- skip_len,
- next_hashes);
- } while (in_next < in_block_end);
+ recent_offsets, is_16_bit,
+ &litrunlen, &next_seq);
+ in_next = CALL_HC_MF(is_16_bit, c,
+ hc_matchfinder_skip_positions,
+ in_begin,
+ in_next - in_begin,
+ in_end - in_begin,
+ skip_len,
+ next_hashes);
+ } while (in_next < in_max_block_end &&
+ !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
lzx_finish_sequence(next_seq, litrunlen);
} while (in_next != in_end);
}
+static void
+lzx_compress_lazy_16(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+ lzx_compress_lazy(c, os, true);
+}
+
+static void
+lzx_compress_lazy_32(struct lzx_compressor *c, struct lzx_output_bitstream *os)
+{
+ lzx_compress_lazy(c, os, false);
+}
+
/* Generate the acceleration tables for offset slots. */
static void
lzx_init_offset_slot_tabs(struct lzx_compressor *c)
lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
{
if (compression_level <= LZX_MAX_FAST_LEVEL) {
- return offsetof(struct lzx_compressor, hc_mf) +
- hc_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, hc_mf_16) +
+ hc_matchfinder_size_16(max_bufsize);
+ else
+ return offsetof(struct lzx_compressor, hc_mf_32) +
+ hc_matchfinder_size_32(max_bufsize);
} else {
- return offsetof(struct lzx_compressor, bt_mf) +
- bt_matchfinder_size(max_bufsize);
+ if (lzx_is_16_bit(max_bufsize))
+ return offsetof(struct lzx_compressor, bt_mf_16) +
+ bt_matchfinder_size_16(max_bufsize);
+ else
+ return offsetof(struct lzx_compressor, bt_mf_32) +
+ bt_matchfinder_size_32(max_bufsize);
}
}
/* Fast compression: Use lazy parsing. */
- c->impl = lzx_compress_lazy;
- c->max_search_depth = (36 * compression_level) / 20;
- c->nice_match_length = (72 * compression_level) / 20;
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_lazy_16;
+ else
+ c->impl = lzx_compress_lazy_32;
+ c->max_search_depth = (60 * compression_level) / 20;
+ c->nice_match_length = (80 * compression_level) / 20;
/* lzx_compress_lazy() needs max_search_depth >= 2 because it
* halves the max_search_depth when attempting a lazy match, and
/* Normal / high compression: Use near-optimal parsing. */
- c->impl = lzx_compress_near_optimal;
+ if (lzx_is_16_bit(max_bufsize))
+ c->impl = lzx_compress_near_optimal_16;
+ else
+ c->impl = lzx_compress_near_optimal_32;
/* Scale nice_match_length and max_search_depth with the
* compression level. */
c->max_search_depth = (24 * compression_level) / 50;
- c->nice_match_length = (32 * compression_level) / 50;
+ c->nice_match_length = (48 * compression_level) / 50;
/* Set a number of optimization passes appropriate for the
* compression level. */
else
memcpy(c->in_buffer, in, in_nbytes);
c->in_nbytes = in_nbytes;
- lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
+ lzx_preprocess(c->in_buffer, in_nbytes);
/* Initially, the previous Huffman codeword lengths are all zeroes. */
c->codes_index = 0;
/* Flush the output bitstream and return the compressed size or 0. */
result = lzx_flush_output(&os);
if (!result && c->destructive)
- lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
+ lzx_postprocess(c->in_buffer, c->in_nbytes);
return result;
}