* Huffman codes that were computed for the block.
*
* Note: the algorithm does not yet attempt to split the input into multiple LZX
- * blocks, instead using a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
+ * blocks; it instead uses a series of blocks of LZX_DIV_BLOCK_SIZE bytes.
*
* Fast algorithm
* --------------
#include "wimlib/lz_sarray.h"
#include "wimlib/lzx.h"
#include "wimlib/util.h"
-#include <pthread.h>
-#include <math.h>
#include <string.h>
#ifdef ENABLE_LZX_DEBUG
};
/* Include template for the match-choosing algorithm. */
-#define LZ_COMPRESSOR struct lzx_compressor
-#define LZ_FORMAT_STATE struct lzx_lru_queue
+#define LZ_COMPRESSOR struct lzx_compressor
+#define LZ_ADAPTIVE_STATE struct lzx_lru_queue
struct lzx_compressor;
#include "wimlib/lz_optimal.h"
* chunks.
*
* We reserve a few extra bytes to potentially allow reading off the end
- * of the array in the match-finding code for optimization purposes.
- */
+ * of the array in the match-finding code for optimization purposes
+ * (currently only needed for the hash chain match-finder). */
u8 *window;
/* Number of bytes of data to be compressed, which is the number of
}
/*
- * Output an LZX match.
+ * Output a precomputed LZX match.
*
- * @out: The bitstream to write the match to.
- * @block_type: The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM)
- * @match: The match.
- * @codes: Pointer to a structure that contains the codewords for the
- * main, length, and aligned offset Huffman codes.
+ * @out:
+ * The bitstream to which to write the match.
+ * @block_type:
+ * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM)
+ * @match:
+ * The match, as a (length, offset) pair.
+ * @codes:
+ * Pointer to a structure that contains the codewords for the main, length,
+ * and aligned offset Huffman codes for the current LZX compressed block.
*/
static void
lzx_write_match(struct output_bitstream *out, int block_type,
}
}
+/* Output an LZX literal (encoded with the main Huffman code). */
+static void
+lzx_write_literal(struct output_bitstream *out, u8 literal,
+ const struct lzx_codes *codes)
+{
+ bitstream_put_bits(out,
+ codes->codewords.main[literal],
+ codes->lens.main[literal]);
+}
+
static unsigned
lzx_build_precode(const u8 lens[restrict],
const u8 prev_lens[restrict],
}
/*
- * Writes a compressed Huffman code to the output, preceded by the precode for
- * it.
+ * Output a Huffman code in the compressed form used in LZX.
+ *
+ * The Huffman code is represented in the output as a logical series of codeword
+ * lengths from which the Huffman code, which must be in canonical form, can be
+ * reconstructed.
+ *
+ * The codeword lengths are themselves compressed using a separate Huffman code,
+ * the "precode", which contains a symbol for each possible codeword length in
+ * the larger code as well as several special symbols to represent repeated
+ * codeword lengths (a form of run-length encoding). The precode is itself
+ * constructed in canonical form, and its codeword lengths are represented
+ * literally in 20 4-bit fields that immediately precede the compressed codeword
+ * lengths of the larger code.
*
- * The Huffman code is represented in the output as a series of path lengths
- * from which the canonical Huffman code can be reconstructed. The path lengths
- * themselves are compressed using a separate Huffman code, the precode, which
- * consists of LZX_PRECODE_NUM_SYMBOLS (= 20) symbols that cover all possible
- * code lengths, plus extra codes for repeated lengths. The path lengths of the
- * precode precede the path lengths of the larger code and are uncompressed,
- * consisting of 20 entries of 4 bits each.
+ * Furthermore, the codeword lengths of the larger code are actually represented
+ * as deltas from the codeword lengths of the corresponding code in the previous
+ * block.
*
- * @out: Bitstream to write the code to.
- * @lens: The code lengths for the Huffman code, indexed by symbol.
- * @prev_lens: Code lengths for this Huffman code, indexed by symbol,
- * in the *previous block*, or all zeroes if this is the
- * first block.
- * @num_syms: The number of symbols in the code.
+ * @out:
+ * Bitstream to which to write the compressed Huffman code.
+ * @lens:
+ * The codeword lengths, indexed by symbol, in the Huffman code.
+ * @prev_lens:
+ * The codeword lengths, indexed by symbol, in the corresponding Huffman
+ * code in the previous block, or all zeroes if this is the first block.
+ * @num_syms:
+ * The number of symbols in the Huffman code.
*/
static void
lzx_write_compressed_code(struct output_bitstream *out,
}
/*
- * Writes all compressed matches and literal bytes in an LZX block to the the
- * output bitstream.
+ * Write all matches and literal bytes (which were precomputed) in an LZX
+ * compressed block to the output bitstream in the final compressed
+ * representation.
*
* @ostream
* The output bitstream.
* @block_type
- * The type of the block (LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM).
+ * The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM).
* @match_tab
- * The array of matches/literals that will be output (length @match_count).
+ * The array of matches/literals to output.
* @match_count
- * Number of matches/literals to be output.
+ * Number of matches/literals to output (length of @match_tab).
* @codes
- * Pointer to a structure that contains the codewords for the main, length,
- * and aligned offset Huffman codes.
+ * The main, length, and aligned offset Huffman codes for the current
+ * LZX compressed block.
*/
static void
lzx_write_matches_and_literals(struct output_bitstream *ostream,
for (unsigned i = 0; i < match_count; i++) {
struct lzx_match match = match_tab[i];
- /* High bit of the match indicates whether the match is an
- * actual match (1) or a literal uncompressed byte (0) */
- if (match.data & 0x80000000) {
- /* match */
- lzx_write_match(ostream, block_type,
- match, codes);
- } else {
- /* literal byte */
- bitstream_put_bits(ostream,
- codes->codewords.main[match.data],
- codes->lens.main[match.data]);
- }
+ /* The high bit of the 32-bit intermediate representation
+ * indicates whether the item is an actual LZ-style match (1) or
+ * a literal byte (0). */
+ if (match.data & 0x80000000)
+ lzx_write_match(ostream, block_type, match, codes);
+ else
+ lzx_write_literal(ostream, match.data, codes);
}
}
/* Retrieve a list of matches available at the next position in the input.
*
- * The matches are written to ctx->matches in decreasing order of length, and
- * the return value is the number of matches found. */
+ * A pointer to the matches array is written into @matches_ret, and the return
+ * value is the number of matches found. */
static u32
lzx_lz_get_matches_caching(struct lzx_compressor *ctx,
const struct lzx_lru_queue *queue,
}
static u32
-lzx_get_prev_literal_cost(struct lzx_compressor *ctx)
+lzx_get_prev_literal_cost(struct lzx_compressor *ctx,
+ struct lzx_lru_queue *queue)
{
return lzx_literal_cost(ctx->window[ctx->match_window_pos - 1],
&ctx->costs);
&ctx->queue);
}
-/*
- * Set default symbol costs.
- */
+/* Set default symbol costs for the LZX Huffman codes. */
static void
lzx_set_default_costs(struct lzx_costs * costs, unsigned num_main_syms)
{
unsigned i;
- /* Literal symbols */
+ /* Main code (part 1): Literal symbols */
for (i = 0; i < LZX_NUM_CHARS; i++)
costs->main[i] = 8;
- /* Match header symbols */
+ /* Main code (part 2): Match header symbols */
for (; i < num_main_syms; i++)
costs->main[i] = 10;
- /* Length symbols */
+ /* Length code */
for (i = 0; i < LZX_LENCODE_NUM_SYMBOLS; i++)
costs->len[i] = 8;
- /* Aligned offset symbols */
+ /* Aligned offset code */
for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
costs->aligned[i] = 3;
}
-/* Given the frequencies of symbols in a compressed block and the corresponding
- * Huffman codes, return LZX_BLOCKTYPE_ALIGNED or LZX_BLOCKTYPE_VERBATIM if an
- * aligned offset or verbatim block, respectively, will take fewer bits to
- * output. */
+/* Given the frequencies of symbols in an LZX-compressed block and the
+ * corresponding Huffman codes, return LZX_BLOCKTYPE_ALIGNED or
+ * LZX_BLOCKTYPE_VERBATIM if an aligned offset or verbatim block, respectively,
+ * will take fewer bits to output. */
static int
lzx_choose_verbatim_or_aligned(const struct lzx_freqs * freqs,
const struct lzx_codes * codes)
/* Verbatim blocks have a constant 3 bits per position footer. Aligned
* offset blocks have an aligned offset symbol per position footer, plus
- * an extra 24 bits to output the lengths necessary to reconstruct the
- * aligned offset code itself. */
+ * an extra 24 bits per block to output the lengths necessary to
+ * reconstruct the aligned offset code itself. */
for (unsigned i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
verbatim_cost += 3 * freqs->aligned[i];
aligned_cost += codes->lens.aligned[i] * freqs->aligned[i];
}
/* Find a near-optimal sequence of matches/literals with which to output the
- * specified LZX block, then set its type to that which has the minimum cost to
- * output. */
+ * specified LZX block, then set the block's type to that which has the minimum
+ * cost to output (either verbatim or aligned). */
static void
lzx_optimize_block(struct lzx_compressor *ctx, struct lzx_block_spec *spec,
unsigned num_passes)
raw_match = lzx_lz_get_near_optimal_match(ctx);
if (raw_match.len >= LZX_MIN_MATCH_LEN) {
- lzx_match.data = lzx_tally_match(raw_match.len, raw_match.offset,
- &freqs, &ctx->queue);
- i += raw_match.len;
+ if (unlikely(raw_match.len == LZX_MIN_MATCH_LEN &&
+ raw_match.offset == ctx->max_window_size -
+ LZX_MIN_MATCH_LEN))
+ {
+ /* Degenerate case where the parser
+ * generated the minimum match length
+ * with the maximum offset. There
+ * aren't actually enough position slots
+ * to represent this offset, as noted in
+ * the comments in
+ * lzx_get_num_main_syms(), so we cannot
+ * allow it. Use literals instead.
+ *
+ * Note that this case only occurs if
+ * the match-finder can generate matches
+ * to the very start of the window. The
+ * suffix array match-finder can,
+ * although typical hash chain and
+ * binary tree match-finders use 0 as a
+ * null value and therefore cannot
+ * generate such matches. */
+ BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
+ lzx_match.data = lzx_tally_literal(ctx->window[i],
+ &freqs);
+ i += 1;
+ ctx->chosen_matches[spec->chosen_matches_start_pos +
+ spec->num_chosen_matches++]
+ = lzx_match;
+ lzx_match.data = lzx_tally_literal(ctx->window[i],
+ &freqs);
+ i += 1;
+ } else {
+ lzx_match.data = lzx_tally_match(raw_match.len,
+ raw_match.offset,
+ &freqs,
+ &ctx->queue);
+ i += raw_match.len;
+ }
} else {
lzx_match.data = lzx_tally_literal(ctx->window[i], &freqs);
i += 1;
/* Set up a default cost model. */
lzx_set_default_costs(&ctx->costs, ctx->num_main_syms);
+ /* TODO: The compression ratio could be slightly improved by performing
+ * data-dependent block splitting instead of using fixed-size blocks.
+ * Doing so well is a computationally hard problem, however. */
ctx->num_blocks = DIV_ROUND_UP(ctx->window_size, LZX_DIV_BLOCK_SIZE);
for (unsigned i = 0; i < ctx->num_blocks; i++) {
unsigned pos = LZX_DIV_BLOCK_SIZE * i;
return compressed_size;
}
-static bool
-lzx_params_valid(const struct wimlib_lzx_compressor_params *params)
-{
- /* Validate parameters. */
- if (params->hdr.size != sizeof(struct wimlib_lzx_compressor_params)) {
- LZX_DEBUG("Invalid parameter structure size!");
- return false;
- }
-
- if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
- params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
- {
- LZX_DEBUG("Invalid algorithm.");
- return false;
- }
-
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
- if (params->alg_params.slow.num_optim_passes < 1)
- {
- LZX_DEBUG("Invalid number of optimization passes!");
- return false;
- }
-
- if (params->alg_params.slow.main_nostat_cost < 1 ||
- params->alg_params.slow.main_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid main_nostat_cost!");
- return false;
- }
-
- if (params->alg_params.slow.len_nostat_cost < 1 ||
- params->alg_params.slow.len_nostat_cost > 16)
- {
- LZX_DEBUG("Invalid len_nostat_cost!");
- return false;
- }
-
- if (params->alg_params.slow.aligned_nostat_cost < 1 ||
- params->alg_params.slow.aligned_nostat_cost > 8)
- {
- LZX_DEBUG("Invalid aligned_nostat_cost!");
- return false;
- }
- }
- return true;
-}
-
static void
lzx_free_compressor(void *_ctx)
{
}
}
+static const struct wimlib_lzx_compressor_params lzx_fast_default = {
+ .hdr = {
+ .size = sizeof(struct wimlib_lzx_compressor_params),
+ },
+ .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
+ .use_defaults = 0,
+ .alg_params = {
+ .fast = {
+ },
+ },
+};
+static const struct wimlib_lzx_compressor_params lzx_slow_default = {
+ .hdr = {
+ .size = sizeof(struct wimlib_lzx_compressor_params),
+ },
+ .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
+ .use_defaults = 0,
+ .alg_params = {
+ .slow = {
+ .use_len2_matches = 1,
+ .nice_match_length = 32,
+ .num_optim_passes = 2,
+ .max_search_depth = 50,
+ .max_matches_per_pos = 3,
+ .main_nostat_cost = 15,
+ .len_nostat_cost = 15,
+ .aligned_nostat_cost = 7,
+ },
+ },
+};
+
+static const struct wimlib_lzx_compressor_params *
+lzx_get_params(const struct wimlib_compressor_params_header *_params)
+{
+ const struct wimlib_lzx_compressor_params *params =
+ (const struct wimlib_lzx_compressor_params*)_params;
+
+ if (params == NULL) {
+ LZX_DEBUG("Using default algorithm and parameters.");
+ params = &lzx_slow_default;
+ } else {
+ if (params->use_defaults) {
+ if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
+ params = &lzx_slow_default;
+ else
+ params = &lzx_fast_default;
+ }
+ }
+ return params;
+}
+
static int
lzx_create_compressor(size_t window_size,
const struct wimlib_compressor_params_header *_params,
void **ctx_ret)
{
- const struct wimlib_lzx_compressor_params *params =
- (const struct wimlib_lzx_compressor_params*)_params;
+ const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
struct lzx_compressor *ctx;
LZX_DEBUG("Allocating LZX context...");
if (!lzx_window_size_valid(window_size))
return WIMLIB_ERR_INVALID_PARAM;
- static const struct wimlib_lzx_compressor_params fast_default = {
- .hdr = {
- .size = sizeof(struct wimlib_lzx_compressor_params),
- },
- .algorithm = WIMLIB_LZX_ALGORITHM_FAST,
- .use_defaults = 0,
- .alg_params = {
- .fast = {
- },
- },
- };
- static const struct wimlib_lzx_compressor_params slow_default = {
- .hdr = {
- .size = sizeof(struct wimlib_lzx_compressor_params),
- },
- .algorithm = WIMLIB_LZX_ALGORITHM_SLOW,
- .use_defaults = 0,
- .alg_params = {
- .slow = {
- .use_len2_matches = 1,
- .num_fast_bytes = 32,
- .num_optim_passes = 2,
- .max_search_depth = 50,
- .max_matches_per_pos = 3,
- .main_nostat_cost = 15,
- .len_nostat_cost = 15,
- .aligned_nostat_cost = 7,
- },
- },
- };
-
- if (params) {
- if (!lzx_params_valid(params))
- return WIMLIB_ERR_INVALID_PARAM;
- } else {
- LZX_DEBUG("Using default algorithm and parameters.");
- params = &slow_default;
- }
-
- if (params->use_defaults) {
- if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW)
- params = &slow_default;
- else
- params = &fast_default;
- }
-
LZX_DEBUG("Allocating memory.");
ctx = CALLOC(1, sizeof(struct lzx_compressor));
if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
if (!lz_match_chooser_init(&ctx->mc,
LZX_OPTIM_ARRAY_SIZE,
- params->alg_params.slow.num_fast_bytes,
+ params->alg_params.slow.nice_match_length,
LZX_MAX_MATCH_LEN))
goto oom;
}
return WIMLIB_ERR_NOMEM;
}
+static u64
+lzx_get_needed_memory(size_t max_block_size,
+ const struct wimlib_compressor_params_header *_params)
+{
+ const struct wimlib_lzx_compressor_params *params = lzx_get_params(_params);
+
+ u64 size = 0;
+
+ size += sizeof(struct lzx_compressor);
+
+ size += max_block_size + 12;
+
+ size += DIV_ROUND_UP(max_block_size, LZX_DIV_BLOCK_SIZE) *
+ sizeof(((struct lzx_compressor*)0)->block_specs[0]);
+
+ if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW) {
+ size += max_block_size * sizeof(((struct lzx_compressor*)0)->chosen_matches[0]);
+ size += lz_sarray_get_needed_memory(max_block_size);
+ size += lz_match_chooser_get_needed_memory(LZX_OPTIM_ARRAY_SIZE,
+ params->alg_params.slow.nice_match_length,
+ LZX_MAX_MATCH_LEN);
+ u32 cache_per_pos;
+
+ cache_per_pos = params->alg_params.slow.max_matches_per_pos;
+ if (cache_per_pos > LZX_MAX_CACHE_PER_POS)
+ cache_per_pos = LZX_MAX_CACHE_PER_POS;
+
+ size += max_block_size * (cache_per_pos + 1) *
+ sizeof(((struct lzx_compressor*)0)->cached_matches[0]);
+ } else {
+ size += max_block_size * sizeof(((struct lzx_compressor*)0)->prev_tab[0]);
+ }
+ return size;
+}
+
+static bool
+lzx_params_valid(const struct wimlib_compressor_params_header *_params)
+{
+ const struct wimlib_lzx_compressor_params *params =
+ (const struct wimlib_lzx_compressor_params*)_params;
+
+ if (params->hdr.size != sizeof(struct wimlib_lzx_compressor_params)) {
+ LZX_DEBUG("Invalid parameter structure size!");
+ return false;
+ }
+
+ if (params->algorithm != WIMLIB_LZX_ALGORITHM_SLOW &&
+ params->algorithm != WIMLIB_LZX_ALGORITHM_FAST)
+ {
+ LZX_DEBUG("Invalid algorithm.");
+ return false;
+ }
+
+ if (params->algorithm == WIMLIB_LZX_ALGORITHM_SLOW &&
+ !params->use_defaults)
+ {
+ if (params->alg_params.slow.num_optim_passes < 1)
+ {
+ LZX_DEBUG("Invalid number of optimization passes!");
+ return false;
+ }
+
+ if (params->alg_params.slow.main_nostat_cost < 1 ||
+ params->alg_params.slow.main_nostat_cost > 16)
+ {
+ LZX_DEBUG("Invalid main_nostat_cost!");
+ return false;
+ }
+
+ if (params->alg_params.slow.len_nostat_cost < 1 ||
+ params->alg_params.slow.len_nostat_cost > 16)
+ {
+ LZX_DEBUG("Invalid len_nostat_cost!");
+ return false;
+ }
+
+ if (params->alg_params.slow.aligned_nostat_cost < 1 ||
+ params->alg_params.slow.aligned_nostat_cost > 8)
+ {
+ LZX_DEBUG("Invalid aligned_nostat_cost!");
+ return false;
+ }
+ }
+ return true;
+}
+
const struct compressor_ops lzx_compressor_ops = {
+ .params_valid = lzx_params_valid,
+ .get_needed_memory = lzx_get_needed_memory,
.create_compressor = lzx_create_compressor,
.compress = lzx_compress,
.free_compressor = lzx_free_compressor,