#include "wimlib/compressor_ops.h"
#include "wimlib/compress_common.h"
+#include "wimlib/endianness.h"
#include "wimlib/error.h"
#include "wimlib/lz_mf.h"
+#include "wimlib/lz_repsearch.h"
#include "wimlib/lzx.h"
#include "wimlib/util.h"
#include <string.h>
/* Allocated size of @cur_window. */
u32 max_window_size;
+ /* log2 order of the LZX window size for LZ match offset encoding
+ * purposes. Will be >= LZX_MIN_WINDOW_ORDER and <=
+ * LZX_MAX_WINDOW_ORDER.
+ *
+ * Note: 1 << @window_order is normally equal to @max_window_size, but
+ * it will be greater than @max_window_size in the event that the
+ * compressor was created with a non-power-of-2 block size. (See
+ * lzx_get_window_order().) */
+ unsigned window_order;
+
/* Compression parameters. */
struct lzx_compressor_params params;
unsigned (*get_matches_func)(struct lzx_compressor *, const struct lz_match **);
void (*skip_bytes_func)(struct lzx_compressor *, unsigned n);
- /* Number of symbols in the main alphabet (depends on the
- * @max_window_size since it determines the maximum allowed offset). */
+ /* Number of symbols in the main alphabet (depends on the @window_order
+ * since it determines the maximum allowed offset). */
unsigned num_main_syms;
/* The current match offset LRU queue. */
};
/* Adaptive state that exists after an approximate minimum-cost path to
- * reach this position is taken. */
+ * reach this position is taken.
+ *
+ * Note: we update this whenever we update the pending minimum-cost
+ * path. This is in contrast to LZMA, which also has an optimal parser
+ * that maintains a repeat offset queue per position, but will only
+ * compute the queue once that position is actually reached in the
+ * parse, meaning that matches are being considered *starting* at that
+ * position. However, the two methods seem to have approximately the
+ * same performance if appropriate optimizations are used. Intuitively
+ * the LZMA method seems faster, but it actually suffers from 1-2 extra
+ * hard-to-predict branches at each position. Probably it works better
+ * for LZMA than LZX because LZMA has a larger adaptive state than LZX,
+ * and the LZMA encoder considers more possibilities. */
struct lzx_lru_queue queue;
};
+
+/*
+ * Structure to keep track of the current state of sending bits to the
+ * compressed output buffer.
+ *
+ * The LZX bitstream is encoded as a sequence of 16-bit coding units.
+ */
+struct lzx_output_bitstream {
+
+ /* Bits that haven't yet been written to the output buffer. */
+ u32 bitbuf;
+
+ /* Number of bits currently held in @bitbuf. */
+ u32 bitcount;
+
+ /* Pointer to the start of the output buffer. */
+ le16 *start;
+
+ /* Pointer to the position in the output buffer at which the next coding
+ * unit should be written. */
+ le16 *next;
+
+ /* Pointer past the end of the output buffer. */
+ le16 *end;
+};
+
+/*
+ * Initialize the output bitstream.
+ *
+ * @os
+ * The output bitstream structure to initialize.
+ * @buffer
+ * The buffer being written to.
+ * @size
+ * Size of @buffer, in bytes.
+ */
+static void
+lzx_init_output(struct lzx_output_bitstream *os, void *buffer, u32 size)
+{
+ os->bitbuf = 0;
+ os->bitcount = 0;
+ os->start = buffer;
+ os->next = os->start;
+ os->end = os->start + size / sizeof(le16);
+}
+
+/*
+ * Write some bits to the output bitstream.
+ *
+ * The bits are given by the low-order @num_bits bits of @bits. Higher-order
+ * bits in @bits cannot be set. At most 17 bits can be written at once.
+ *
+ * @max_bits is a compile-time constant that specifies the maximum number of
+ * bits that can ever be written at the call site. Currently, it is used to
+ * optimize away the conditional code for writing a second 16-bit coding unit
+ * when writing fewer than 17 bits.
+ *
+ * If the output buffer space is exhausted, then the bits will be ignored, and
+ * lzx_flush_output() will return 0 when it gets called.
+ */
+static _always_inline_attribute void
+lzx_write_varbits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits,
+ const unsigned int max_num_bits)
+{
+ /* This code is optimized for LZX, which never needs to write more than
+ * 17 bits at once. */
+ LZX_ASSERT(num_bits <= 17);
+ LZX_ASSERT(num_bits <= max_num_bits);
+ LZX_ASSERT(os->bitcount <= 15);
+
+ /* Add the bits to the bit buffer variable. @bitcount will be at most
+ * 15, so there will be just enough space for the maximum possible
+ * @num_bits of 17. */
+ os->bitcount += num_bits;
+ os->bitbuf = (os->bitbuf << num_bits) | bits;
+
+ /* Check whether any coding units need to be written. */
+ if (os->bitcount >= 16) {
+
+ os->bitcount -= 16;
+
+ /* Write a coding unit, unless it would overflow the buffer. */
+ if (os->next != os->end)
+ *os->next++ = cpu_to_le16(os->bitbuf >> os->bitcount);
+
+ /* If writing 17 bits, a second coding unit might need to be
+ * written. But because 'max_num_bits' is a compile-time
+ * constant, the compiler will optimize away this code at most
+ * call sites. */
+ if (max_num_bits == 17 && os->bitcount == 16) {
+ if (os->next != os->end)
+ *os->next++ = cpu_to_le16(os->bitbuf);
+ os->bitcount = 0;
+ }
+ }
+}
+
+/* Use when @num_bits is a compile-time constant. Otherwise use
+ * lzx_write_varbits(). */
+static _always_inline_attribute void
+lzx_write_bits(struct lzx_output_bitstream *os,
+ const u32 bits, const unsigned int num_bits)
+{
+ lzx_write_varbits(os, bits, num_bits, num_bits);
+}
+
+/*
+ * Flush the last coding unit to the output buffer if needed. Return the total
+ * number of bytes written to the output buffer, or 0 if an overflow occurred.
+ */
+static u32
+lzx_flush_output(struct lzx_output_bitstream *os)
+{
+ if (os->next == os->end)
+ return 0;
+
+ if (os->bitcount != 0)
+ *os->next++ = cpu_to_le16(os->bitbuf << (16 - os->bitcount));
+
+ return (const u8 *)os->next - (const u8 *)os->start;
+}
+
/* Returns the LZX position slot that corresponds to a given match offset,
* taking into account the recent offset queue and updating it if the offset is
* found in it. */
/*
* Output a precomputed LZX match.
*
- * @out:
+ * @os:
* The bitstream to which to write the match.
- * @block_type:
- * The type of the LZX block (LZX_BLOCKTYPE_ALIGNED or
- * LZX_BLOCKTYPE_VERBATIM)
+ * @ones_if_aligned
+ * A mask of all ones if the block is of type LZX_BLOCKTYPE_ALIGNED,
+ * otherwise 0.
* @match:
* The match data.
* @codes:
* and aligned offset Huffman codes for the current LZX compressed block.
*/
static void
-lzx_write_match(struct output_bitstream *out, int block_type,
+lzx_write_match(struct lzx_output_bitstream *os, unsigned ones_if_aligned,
struct lzx_item match, const struct lzx_codes *codes)
{
- /* low 8 bits are the match length minus 2 */
unsigned match_len_minus_2 = match.data & 0xff;
- /* Next 17 bits are the position footer */
- unsigned position_footer = (match.data >> 8) & 0x1ffff; /* 17 bits */
- /* Next 6 bits are the position slot. */
- unsigned position_slot = (match.data >> 25) & 0x3f; /* 6 bits */
+ u32 position_footer = (match.data >> 8) & 0x1ffff;
+ unsigned position_slot = (match.data >> 25) & 0x3f;
unsigned len_header;
unsigned len_footer;
unsigned main_symbol;
unsigned num_extra_bits;
- unsigned verbatim_bits;
- unsigned aligned_bits;
/* If the match length is less than MIN_MATCH_LEN (= 2) +
- * NUM_PRIMARY_LENS (= 7), the length header contains
- * the match length minus MIN_MATCH_LEN, and there is no
- * length footer.
+ * NUM_PRIMARY_LENS (= 7), the length header contains the match length
+ * minus MIN_MATCH_LEN, and there is no length footer.
*
- * Otherwise, the length header contains
- * NUM_PRIMARY_LENS, and the length footer contains
- * the match length minus NUM_PRIMARY_LENS minus
+ * Otherwise, the length header contains NUM_PRIMARY_LENS, and the
+ * length footer contains the match length minus NUM_PRIMARY_LENS minus
* MIN_MATCH_LEN. */
if (match_len_minus_2 < LZX_NUM_PRIMARY_LENS) {
len_header = match_len_minus_2;
main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
/* Output main symbol. */
- bitstream_put_bits(out, codes->codewords.main[main_symbol],
- codes->lens.main[main_symbol]);
+ lzx_write_varbits(os, codes->codewords.main[main_symbol],
+ codes->lens.main[main_symbol],
+ LZX_MAX_MAIN_CODEWORD_LEN);
/* If there is a length footer, output it using the
* length Huffman code. */
- if (len_header == LZX_NUM_PRIMARY_LENS)
- bitstream_put_bits(out, codes->codewords.len[len_footer],
- codes->lens.len[len_footer]);
+ if (len_header == LZX_NUM_PRIMARY_LENS) {
+ lzx_write_varbits(os, codes->codewords.len[len_footer],
+ codes->lens.len[len_footer],
+ LZX_MAX_LEN_CODEWORD_LEN);
+ }
+
+ /* Output the position footer. */
num_extra_bits = lzx_get_num_extra_bits(position_slot);
- /* For aligned offset blocks with at least 3 extra bits, output the
- * verbatim bits literally, then the aligned bits encoded using the
- * aligned offset code. Otherwise, only the verbatim bits need to be
- * output. */
- if ((block_type == LZX_BLOCKTYPE_ALIGNED) && (num_extra_bits >= 3)) {
+ if ((num_extra_bits & ones_if_aligned) >= 3) {
+
+ /* Aligned offset blocks: The low 3 bits of the position footer
+ * are Huffman-encoded using the aligned offset code. The
+ * remaining bits are output literally. */
- verbatim_bits = position_footer >> 3;
- bitstream_put_bits(out, verbatim_bits,
- num_extra_bits - 3);
+ lzx_write_varbits(os,
+ position_footer >> 3, num_extra_bits - 3, 14);
- aligned_bits = (position_footer & 7);
- bitstream_put_bits(out,
- codes->codewords.aligned[aligned_bits],
- codes->lens.aligned[aligned_bits]);
+ lzx_write_varbits(os,
+ codes->codewords.aligned[position_footer & 7],
+ codes->lens.aligned[position_footer & 7],
+ LZX_MAX_ALIGNED_CODEWORD_LEN);
} else {
- /* verbatim bits is the same as the position
- * footer, in this case. */
- bitstream_put_bits(out, position_footer, num_extra_bits);
+ /* Verbatim blocks, or fewer than 3 extra bits: All position
+ * footer bits are output literally. */
+ lzx_write_varbits(os, position_footer, num_extra_bits, 17);
}
}
/* Output an LZX literal (encoded with the main Huffman code). */
static void
-lzx_write_literal(struct output_bitstream *out, u8 literal,
+lzx_write_literal(struct lzx_output_bitstream *os, unsigned literal,
const struct lzx_codes *codes)
{
- bitstream_put_bits(out,
- codes->codewords.main[literal],
- codes->lens.main[literal]);
+ lzx_write_varbits(os, codes->codewords.main[literal],
+ codes->lens.main[literal], LZX_MAX_MAIN_CODEWORD_LEN);
}
static unsigned
-lzx_build_precode(const u8 lens[restrict],
- const u8 prev_lens[restrict],
- const unsigned num_syms,
- u32 precode_freqs[restrict LZX_PRECODE_NUM_SYMBOLS],
- u8 output_syms[restrict num_syms],
- u8 precode_lens[restrict LZX_PRECODE_NUM_SYMBOLS],
- u32 precode_codewords[restrict LZX_PRECODE_NUM_SYMBOLS],
- unsigned *num_additional_bits_ret)
+lzx_compute_precode_items(const u8 lens[restrict],
+ const u8 prev_lens[restrict],
+ const unsigned num_lens,
+ u32 precode_freqs[restrict],
+ unsigned precode_items[restrict])
{
- memset(precode_freqs, 0,
- LZX_PRECODE_NUM_SYMBOLS * sizeof(precode_freqs[0]));
-
- /* Since the code word lengths use a form of RLE encoding, the goal here
- * is to find each run of identical lengths when going through them in
- * symbol order (including runs of length 1). For each run, as many
- * lengths are encoded using RLE as possible, and the rest are output
- * literally.
- *
- * output_syms[] will be filled in with the length symbols that will be
- * output, including RLE codes, not yet encoded using the precode.
- *
- * cur_run_len keeps track of how many code word lengths are in the
- * current run of identical lengths. */
- unsigned output_syms_idx = 0;
- unsigned cur_run_len = 1;
- unsigned num_additional_bits = 0;
- for (unsigned i = 1; i <= num_syms; i++) {
-
- if (i != num_syms && lens[i] == lens[i - 1]) {
- /* Still in a run--- keep going. */
- cur_run_len++;
- continue;
- }
+ unsigned *itemptr;
+ unsigned run_start;
+ unsigned run_end;
+ unsigned extra_bits;
+ int delta;
+ u8 len;
+
+ itemptr = precode_items;
+ run_start = 0;
+ do {
+ /* Find the next run of codeword lengths. */
+
+ /* len = the length being repeated */
+ len = lens[run_start];
- /* Run ended! Check if it is a run of zeroes or a run of
- * nonzeroes. */
+ run_end = run_start + 1;
- /* The symbol that was repeated in the run--- not to be confused
- * with the length *of* the run (cur_run_len) */
- unsigned len_in_run = lens[i - 1];
+ /* Fast case for a single length. */
+ if (likely(run_end == num_lens || len != lens[run_end])) {
+ delta = prev_lens[run_start] - len;
+ if (delta < 0)
+ delta += 17;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
+ continue;
+ }
- if (len_in_run == 0) {
- /* A run of 0's. Encode it in as few length
- * codes as we can. */
+ /* Extend the run. */
+ do {
+ run_end++;
+ } while (run_end != num_lens && len == lens[run_end]);
- /* The magic length 18 indicates a run of 20 + n zeroes,
- * where n is an uncompressed literal 5-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 20) {
- unsigned additional_bits;
+ if (len == 0) {
+ /* Run of zeroes. */
- additional_bits = min(cur_run_len - 20, 0x1f);
- num_additional_bits += 5;
+ /* Symbol 18: RLE 20 to 51 zeroes at a time. */
+ while ((run_end - run_start) >= 20) {
+ extra_bits = min((run_end - run_start) - 20, 0x1f);
precode_freqs[18]++;
- output_syms[output_syms_idx++] = 18;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 20 + additional_bits;
+ *itemptr++ = 18 | (extra_bits << 5);
+ run_start += 20 + extra_bits;
}
- /* The magic length 17 indicates a run of 4 + n zeroes,
- * where n is an uncompressed literal 4-bit integer that
- * follows the magic length. */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
-
- additional_bits = min(cur_run_len - 4, 0xf);
- num_additional_bits += 4;
+ /* Symbol 17: RLE 4 to 19 zeroes at a time. */
+ if ((run_end - run_start) >= 4) {
+ extra_bits = min((run_end - run_start) - 4, 0xf);
precode_freqs[17]++;
- output_syms[output_syms_idx++] = 17;
- output_syms[output_syms_idx++] = additional_bits;
- cur_run_len -= 4 + additional_bits;
+ *itemptr++ = 17 | (extra_bits << 5);
+ run_start += 4 + extra_bits;
}
-
} else {
/* A run of nonzero lengths. */
- /* The magic length 19 indicates a run of 4 + n
- * nonzeroes, where n is a literal bit that follows the
- * magic length, and where the value of the lengths in
- * the run is given by an extra length symbol, encoded
- * with the precode, that follows the literal bit.
- *
- * The extra length symbol is encoded as a difference
- * from the length of the codeword for the first symbol
- * in the run in the previous code.
- * */
- while (cur_run_len >= 4) {
- unsigned additional_bits;
- signed char delta;
-
- additional_bits = (cur_run_len > 4);
- num_additional_bits += 1;
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Symbol 19: RLE 4 to 5 of any length at a time. */
+ while ((run_end - run_start) >= 4) {
+ extra_bits = (run_end - run_start) > 4;
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
precode_freqs[19]++;
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = 19;
- output_syms[output_syms_idx++] = additional_bits;
- output_syms[output_syms_idx++] = delta;
- cur_run_len -= 4 + additional_bits;
+ precode_freqs[delta]++;
+ *itemptr++ = 19 | (extra_bits << 5) | (delta << 6);
+ run_start += 4 + extra_bits;
}
}
- /* Any remaining lengths in the run are outputted without RLE,
- * as a difference from the length of that codeword in the
- * previous code. */
- while (cur_run_len > 0) {
- signed char delta;
-
- delta = (signed char)prev_lens[i - cur_run_len] -
- (signed char)len_in_run;
+ /* Output any remaining lengths without RLE. */
+ while (run_start != run_end) {
+ delta = prev_lens[run_start] - len;
if (delta < 0)
delta += 17;
-
- precode_freqs[(unsigned char)delta]++;
- output_syms[output_syms_idx++] = delta;
- cur_run_len--;
+ precode_freqs[delta]++;
+ *itemptr++ = delta;
+ run_start++;
}
+ } while (run_start != num_lens);
- cur_run_len = 1;
- }
-
- /* Build the precode from the frequencies of the length symbols. */
-
- make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
- LZX_MAX_PRE_CODEWORD_LEN,
- precode_freqs, precode_lens,
- precode_codewords);
-
- *num_additional_bits_ret = num_additional_bits;
-
- return output_syms_idx;
+ return itemptr - precode_items;
}
/*
* as deltas from the codeword lengths of the corresponding code in the previous
* block.
*
- * @out:
+ * @os:
* Bitstream to which to write the compressed Huffman code.
* @lens:
* The codeword lengths, indexed by symbol, in the Huffman code.
* @prev_lens:
* The codeword lengths, indexed by symbol, in the corresponding Huffman
* code in the previous block, or all zeroes if this is the first block.
- * @num_syms:
+ * @num_lens:
* The number of symbols in the Huffman code.
*/
static void
-lzx_write_compressed_code(struct output_bitstream *out,
+lzx_write_compressed_code(struct lzx_output_bitstream *os,
const u8 lens[restrict],
const u8 prev_lens[restrict],
- unsigned num_syms)
+ unsigned num_lens)
{
u32 precode_freqs[LZX_PRECODE_NUM_SYMBOLS];
- u8 output_syms[num_syms];
u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
u32 precode_codewords[LZX_PRECODE_NUM_SYMBOLS];
+ unsigned precode_items[num_lens];
+ unsigned num_precode_items;
+ unsigned precode_item;
+ unsigned precode_sym;
unsigned i;
- unsigned num_output_syms;
- u8 precode_sym;
- unsigned dummy;
-
- num_output_syms = lzx_build_precode(lens,
- prev_lens,
- num_syms,
- precode_freqs,
- output_syms,
- precode_lens,
- precode_codewords,
- &dummy);
-
- /* Write the lengths of the precode codes to the output. */
+
+ for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
+ precode_freqs[i] = 0;
+
+ /* Compute the "items" (RLE / literal tokens and extra bits) with which
+ * the codeword lengths in the larger code will be output. */
+ num_precode_items = lzx_compute_precode_items(lens,
+ prev_lens,
+ num_lens,
+ precode_freqs,
+ precode_items);
+
+ /* Build the precode. */
+ make_canonical_huffman_code(LZX_PRECODE_NUM_SYMBOLS,
+ LZX_MAX_PRE_CODEWORD_LEN,
+ precode_freqs, precode_lens,
+ precode_codewords);
+
+ /* Output the lengths of the codewords in the precode. */
for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(out, precode_lens[i],
- LZX_PRECODE_ELEMENT_SIZE);
-
- /* Write the length symbols, encoded with the precode, to the output. */
-
- for (i = 0; i < num_output_syms; ) {
- precode_sym = output_syms[i++];
-
- bitstream_put_bits(out, precode_codewords[precode_sym],
- precode_lens[precode_sym]);
- switch (precode_sym) {
- case 17:
- bitstream_put_bits(out, output_syms[i++], 4);
- break;
- case 18:
- bitstream_put_bits(out, output_syms[i++], 5);
- break;
- case 19:
- bitstream_put_bits(out, output_syms[i++], 1);
- bitstream_put_bits(out,
- precode_codewords[output_syms[i]],
- precode_lens[output_syms[i]]);
- i++;
- break;
- default:
- break;
+ lzx_write_bits(os, precode_lens[i], LZX_PRECODE_ELEMENT_SIZE);
+
+ /* Output the encoded lengths of the codewords in the larger code. */
+ for (i = 0; i < num_precode_items; i++) {
+ precode_item = precode_items[i];
+ precode_sym = precode_item & 0x1F;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ if (precode_sym >= 17) {
+ if (precode_sym == 17) {
+ lzx_write_bits(os, precode_item >> 5, 4);
+ } else if (precode_sym == 18) {
+ lzx_write_bits(os, precode_item >> 5, 5);
+ } else {
+ lzx_write_bits(os, (precode_item >> 5) & 1, 1);
+ precode_sym = precode_item >> 6;
+ lzx_write_varbits(os, precode_codewords[precode_sym],
+ precode_lens[precode_sym],
+ LZX_MAX_PRE_CODEWORD_LEN);
+ }
}
}
}
* compressed block to the output bitstream in the final compressed
* representation.
*
- * @ostream
+ * @os
* The output bitstream.
* @block_type
* The chosen type of the LZX compressed block (LZX_BLOCKTYPE_ALIGNED or
* LZX compressed block.
*/
static void
-lzx_write_items(struct output_bitstream *ostream, int block_type,
+lzx_write_items(struct lzx_output_bitstream *os, int block_type,
const struct lzx_item items[], u32 num_items,
const struct lzx_codes *codes)
{
+ unsigned ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
+
for (u32 i = 0; i < num_items; i++) {
/* The high bit of the 32-bit intermediate representation
* indicates whether the item is an actual LZ-style match (1) or
* a literal byte (0). */
if (items[i].data & 0x80000000)
- lzx_write_match(ostream, block_type, items[i], codes);
+ lzx_write_match(os, ones_if_aligned, items[i], codes);
else
- lzx_write_literal(ostream, items[i].data, codes);
+ lzx_write_literal(os, items[i].data, codes);
}
}
/* Write an LZX aligned offset or verbatim block to the output. */
static void
lzx_write_compressed_block(int block_type,
- unsigned block_size,
- unsigned max_window_size,
+ u32 block_size,
+ unsigned window_order,
unsigned num_main_syms,
struct lzx_item * chosen_items,
- unsigned num_chosen_items,
+ u32 num_chosen_items,
const struct lzx_codes * codes,
const struct lzx_codes * prev_codes,
- struct output_bitstream * ostream)
+ struct lzx_output_bitstream * os)
{
- unsigned i;
-
LZX_ASSERT(block_type == LZX_BLOCKTYPE_ALIGNED ||
block_type == LZX_BLOCKTYPE_VERBATIM);
/* The first three bits indicate the type of block and are one of the
* LZX_BLOCKTYPE_* constants. */
- bitstream_put_bits(ostream, block_type, 3);
+ lzx_write_bits(os, block_type, 3);
/* Output the block size.
*
* because WIMs created with chunk size greater than 32768 can seemingly
* only be opened by wimlib anyway. */
if (block_size == LZX_DEFAULT_BLOCK_SIZE) {
- bitstream_put_bits(ostream, 1, 1);
+ lzx_write_bits(os, 1, 1);
} else {
- bitstream_put_bits(ostream, 0, 1);
+ lzx_write_bits(os, 0, 1);
- if (max_window_size >= 65536)
- bitstream_put_bits(ostream, block_size >> 16, 8);
+ if (window_order >= 16)
+ lzx_write_bits(os, block_size >> 16, 8);
- bitstream_put_bits(ostream, block_size, 16);
+ lzx_write_bits(os, block_size & 0xFFFF, 16);
}
- /* Write out lengths of the main code. Note that the LZX specification
- * incorrectly states that the aligned offset code comes after the
- * length code, but in fact it is the very first code to be written
- * (before the main code). */
- if (block_type == LZX_BLOCKTYPE_ALIGNED)
- for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++)
- bitstream_put_bits(ostream, codes->lens.aligned[i],
- LZX_ALIGNEDCODE_ELEMENT_SIZE);
-
- /* Write the precode and lengths for the first LZX_NUM_CHARS symbols in
- * the main code, which are the codewords for literal bytes. */
- lzx_write_compressed_code(ostream,
- codes->lens.main,
+ /* Output the aligned offset code. */
+ if (block_type == LZX_BLOCKTYPE_ALIGNED) {
+ for (int i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
+ lzx_write_bits(os, codes->lens.aligned[i],
+ LZX_ALIGNEDCODE_ELEMENT_SIZE);
+ }
+ }
+
+ /* Output the main code (two parts). */
+ lzx_write_compressed_code(os, codes->lens.main,
prev_codes->lens.main,
LZX_NUM_CHARS);
-
- /* Write the precode and lengths for the rest of the main code, which
- * are the codewords for match headers. */
- lzx_write_compressed_code(ostream,
- codes->lens.main + LZX_NUM_CHARS,
+ lzx_write_compressed_code(os, codes->lens.main + LZX_NUM_CHARS,
prev_codes->lens.main + LZX_NUM_CHARS,
num_main_syms - LZX_NUM_CHARS);
- /* Write the precode and lengths for the length code. */
- lzx_write_compressed_code(ostream,
- codes->lens.len,
+ /* Output the length code. */
+ lzx_write_compressed_code(os, codes->lens.len,
prev_codes->lens.len,
LZX_LENCODE_NUM_SYMBOLS);
- /* Write the actual matches and literals. */
- lzx_write_items(ostream, block_type,
- chosen_items, num_chosen_items, codes);
+ /* Output the compressed matches and literals. */
+ lzx_write_items(os, block_type, chosen_items, num_chosen_items, codes);
}
/* Write out the LZX blocks that were computed. */
static void
-lzx_write_all_blocks(struct lzx_compressor *c, struct output_bitstream *ostream)
+lzx_write_all_blocks(struct lzx_compressor *c, struct lzx_output_bitstream *os)
{
const struct lzx_codes *prev_codes = &c->zero_codes;
for (unsigned i = 0; i < c->num_blocks; i++) {
const struct lzx_block_spec *spec = &c->block_specs[i];
- LZX_DEBUG("Writing block %u/%u (type=%d, size=%u, num_chosen_items=%u)...",
- i + 1, c->num_blocks,
- spec->block_type, spec->block_size,
- spec->num_chosen_items);
-
lzx_write_compressed_block(spec->block_type,
spec->block_size,
- c->max_window_size,
+ c->window_order,
c->num_main_syms,
spec->chosen_items,
spec->num_chosen_items,
&spec->codes,
prev_codes,
- ostream);
+ os);
prev_codes = &spec->codes;
}
struct lzx_freqs *freqs, struct lzx_lru_queue *queue)
{
unsigned position_slot;
- unsigned position_footer;
+ u32 position_footer;
u32 len_header;
unsigned main_symbol;
unsigned len_footer;
* as part of the main symbol) and a position footer. */
position_slot = lzx_get_position_slot(match_offset, queue);
position_footer = (match_offset + LZX_OFFSET_OFFSET) &
- ((1U << lzx_get_num_extra_bits(position_slot)) - 1);
+ (((u32)1 << lzx_get_num_extra_bits(position_slot)) - 1);
/* The match length shall be encoded as a length header (itself encoded
* as part of the main symbol) and an optional length footer. */
return costs->main[c];
}
-/* Given a (length, offset) pair that could be turned into a valid LZX match as
- * well as costs for the codewords in the main, length, and aligned Huffman
- * codes, return the approximate number of bits it will take to represent this
- * match in the compressed output. Take into account the match offset LRU
- * queue and also updates it. */
+/* Returns the cost, in bits, to output a repeat offset match of the specified
+ * length and position slot (repeat index) using the specified cost model. */
static u32
-lzx_match_cost(unsigned length, u32 offset, const struct lzx_costs *costs,
- struct lzx_lru_queue *queue)
+lzx_repmatch_cost(u32 len, unsigned position_slot, const struct lzx_costs *costs)
{
- unsigned position_slot;
unsigned len_header, main_symbol;
- unsigned num_extra_bits;
u32 cost = 0;
- position_slot = lzx_get_position_slot(offset, queue);
-
- len_header = min(length - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
+ len_header = min(len - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
/* Account for main symbol. */
cost += costs->main[main_symbol];
- /* Account for extra position information. */
- num_extra_bits = lzx_get_num_extra_bits(position_slot);
- if (num_extra_bits >= 3) {
- cost += num_extra_bits - 3;
- cost += costs->aligned[(offset + LZX_OFFSET_OFFSET) & 7];
- } else {
- cost += num_extra_bits;
- }
-
/* Account for extra length information. */
if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += costs->len[length - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ cost += costs->len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
return cost;
-
}
-
/* Set the cost model @c->costs from the Huffman codeword lengths specified in
* @lens.
*
}
/*
- * lzx_choose_near_optimal_match() -
+ * Find the longest repeat offset match.
+ *
+ * If no match of at least LZX_MIN_MATCH_LEN bytes is found, then return 0.
+ *
+ * If a match of at least LZX_MIN_MATCH_LEN bytes is found, then return its
+ * length and set *slot_ret to the index of its offset in @queue.
+ */
+static inline u32
+lzx_repsearch(const u8 * const strptr, const u32 bytes_remaining,
+ const struct lzx_lru_queue *queue, unsigned *slot_ret)
+{
+ BUILD_BUG_ON(LZX_MIN_MATCH_LEN != 2);
+ return lz_repsearch(strptr, bytes_remaining, LZX_MAX_MATCH_LEN,
+ queue->R, LZX_NUM_RECENT_OFFSETS, slot_ret);
+}
+
+/*
+ * lzx_choose_near_optimal_item() -
*
* Choose an approximately optimal match or literal to use at the next position
* in the string, or "window", being LZ-encoded.
unsigned num_matches;
const struct lz_match *matches;
struct lz_match match;
- unsigned longest_len;
- unsigned longest_rep_len;
- u32 longest_rep_offset;
+ u32 longest_len;
+ u32 longest_rep_len;
+ unsigned longest_rep_slot;
unsigned cur_pos;
unsigned end_pos;
struct lzx_mc_pos_data *optimum = c->optimum;
c->optimum_cur_idx = 0;
c->optimum_end_idx = 0;
- /* Search for matches at recent offsets. Only keep the one with the
- * longest match length. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- if (c->match_window_pos >= 1) {
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- c->match_window_end - c->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = c->queue.R[i];
- const u8 *strptr = &c->cur_window[c->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
- }
- }
+ /* Search for matches at repeat offsets. As a heuristic, we only keep
+ * the one with the longest match length. */
+ if (likely(c->match_window_pos >= 1)) {
+ longest_rep_len = lzx_repsearch(&c->cur_window[c->match_window_pos],
+ c->match_window_end - c->match_window_pos,
+ &c->queue,
+ &longest_rep_slot);
+ } else {
+ longest_rep_len = 0;
}
- /* If there's a long match with a recent offset, take it. */
+ /* If there's a long match with a repeat offset, choose it immediately. */
if (longest_rep_len >= c->params.nice_match_length) {
lzx_skip_bytes(c, longest_rep_len);
return (struct lz_match) {
.len = longest_rep_len,
- .offset = longest_rep_offset,
+ .offset = c->queue.R[longest_rep_slot],
};
}
- /* Search other matches. */
+ /* Find other matches. */
num_matches = lzx_get_matches(c, &matches);
- /* If there's a long match, take it. */
+ /* If there's a long match, choose it immediately. */
if (num_matches) {
longest_len = matches[num_matches - 1].len;
if (longest_len >= c->params.nice_match_length) {
longest_len = 1;
}
- /* Calculate the cost to reach the next position by coding a literal.
- */
+ /* Calculate the cost to reach the next position by coding a literal. */
optimum[1].queue = c->queue;
optimum[1].cost = lzx_literal_cost(c->cur_window[c->match_window_pos - 1],
&c->costs);
}
do {
+ u32 cost;
unsigned len_header;
unsigned main_symbol;
- u32 cost;
cost = position_cost;
- len_header = min(len - LZX_MIN_MATCH_LEN, LZX_NUM_PRIMARY_LENS);
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
+ cost += c->costs.len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
+ }
+
main_symbol = ((position_slot << 3) | len_header) + LZX_NUM_CHARS;
cost += c->costs.main[main_symbol];
- if (len_header == LZX_NUM_PRIMARY_LENS)
- cost += c->costs.len[len - LZX_MIN_MATCH_LEN - LZX_NUM_PRIMARY_LENS];
optimum[len].queue = queue;
optimum[len].prev.link = 0;
}
end_pos = longest_len;
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
+ if (longest_rep_len) {
+
+ LZX_ASSERT(longest_rep_len >= LZX_MIN_MATCH_LEN);
+
u32 cost;
while (end_pos < longest_rep_len)
optimum[++end_pos].cost = MC_INFINITE_COST;
- queue = c->queue;
- cost = lzx_match_cost(longest_rep_len, longest_rep_offset,
- &c->costs, &queue);
+ cost = lzx_repmatch_cost(longest_rep_len, longest_rep_slot,
+ &c->costs);
if (cost <= optimum[longest_rep_len].cost) {
- optimum[longest_rep_len].queue = queue;
+ optimum[longest_rep_len].queue = c->queue;
+ swap(optimum[longest_rep_len].queue.R[0],
+ optimum[longest_rep_len].queue.R[longest_rep_slot]);
optimum[longest_rep_len].prev.link = 0;
- optimum[longest_rep_len].prev.match_offset = longest_rep_offset;
+ optimum[longest_rep_len].prev.match_offset =
+ optimum[longest_rep_len].queue.R[0];
optimum[longest_rep_len].cost = cost;
}
}
* position. The algorithm may find multiple paths to reach each
* position; only the lowest-cost path is saved.
*
- * The progress of the parse is tracked in the @optimum array, which
- * for each position contains the minimum cost to reach that position,
- * the index of the start of the match/literal taken to reach that
- * position through the minimum-cost path, the offset of the match taken
- * (not relevant for literals), and the adaptive state that will exist
- * at that position after the minimum-cost path is taken. The @cur_pos
+ * The progress of the parse is tracked in the @optimum array, which for
+ * each position contains the minimum cost to reach that position, the
+ * index of the start of the match/literal taken to reach that position
+ * through the minimum-cost path, the offset of the match taken (not
+ * relevant for literals), and the adaptive state that will exist at
+ * that position after the minimum-cost path is taken. The @cur_pos
* variable stores the position at which the algorithm is currently
* considering coding choices, and the @end_pos variable stores the
* greatest position at which the costs of coding choices have been
- * saved. (Actually, the algorithm guarantees that all positions up to
- * and including @end_pos are reachable by at least one path.)
+ * saved.
*
* The loop terminates when any one of the following conditions occurs:
*
if (cur_pos == end_pos || cur_pos == LZX_OPTIM_ARRAY_LENGTH)
return lzx_match_chooser_reverse_list(c, cur_pos);
- /* Search for matches at recent offsets. */
- longest_rep_len = LZX_MIN_MATCH_LEN - 1;
- unsigned limit = min(LZX_MAX_MATCH_LEN,
- c->match_window_end - c->match_window_pos);
- for (int i = 0; i < LZX_NUM_RECENT_OFFSETS; i++) {
- u32 offset = optimum[cur_pos].queue.R[i];
- const u8 *strptr = &c->cur_window[c->match_window_pos];
- const u8 *matchptr = strptr - offset;
- unsigned len = 0;
- while (len < limit && strptr[len] == matchptr[len])
- len++;
- if (len > longest_rep_len) {
- longest_rep_len = len;
- longest_rep_offset = offset;
- }
- }
+ /* Search for matches at repeat offsets. Again, as a heuristic
+ * we only keep the longest one. */
+ longest_rep_len = lzx_repsearch(&c->cur_window[c->match_window_pos],
+ c->match_window_end - c->match_window_pos,
+ &optimum[cur_pos].queue,
+ &longest_rep_slot);
- /* If we found a long match at a recent offset, choose it
+ /* If we found a long match at a repeat offset, choose it
* immediately. */
if (longest_rep_len >= c->params.nice_match_length) {
/* Build the list of matches to return and get
match = lzx_match_chooser_reverse_list(c, cur_pos);
/* Append the long match to the end of the list. */
- optimum[cur_pos].next.match_offset = longest_rep_offset;
+ optimum[cur_pos].next.match_offset =
+ optimum[cur_pos].queue.R[longest_rep_slot];
optimum[cur_pos].next.link = cur_pos + longest_rep_len;
c->optimum_end_idx = cur_pos + longest_rep_len;
return match;
}
- /* Search other matches. */
+ /* Find other matches. */
num_matches = lzx_get_matches(c, &matches);
- /* If there's a long match, take it. */
+ /* If there's a long match, choose it immediately. */
if (num_matches) {
longest_len = matches[num_matches - 1].len;
if (longest_len >= c->params.nice_match_length) {
longest_len = 1;
}
+ /* If we are reaching any positions for the first time, we need
+ * to initialize their costs to infinity. */
while (end_pos < cur_pos + longest_len)
optimum[++end_pos].cost = MC_INFINITE_COST;
* length. */
for (unsigned i = 0, len = 2; i < num_matches; i++) {
u32 offset;
- struct lzx_lru_queue queue;
u32 position_cost;
unsigned position_slot;
unsigned num_extra_bits;
offset = matches[i].offset;
- queue = optimum[cur_pos].queue;
position_cost = optimum[cur_pos].cost;
- position_slot = lzx_get_position_slot(offset, &queue);
+ /* Yet another optimization: instead of calling
+ * lzx_get_position_slot(), hand-inline the search of
+ * the repeat offset queue. Then we can omit the
+ * extra_bits calculation for repeat offset matches, and
+ * also only compute the updated queue if we actually do
+ * find a new lowest cost path. */
+ for (position_slot = 0; position_slot < LZX_NUM_RECENT_OFFSETS; position_slot++)
+ if (offset == optimum[cur_pos].queue.R[position_slot])
+ goto have_position_cost;
+
+ position_slot = lzx_get_position_slot_raw(offset + LZX_OFFSET_OFFSET);
+
num_extra_bits = lzx_get_num_extra_bits(position_slot);
if (num_extra_bits >= 3) {
position_cost += num_extra_bits - 3;
position_cost += num_extra_bits;
}
+ have_position_cost:
+
do {
+ u32 cost;
unsigned len_header;
unsigned main_symbol;
- u32 cost;
cost = position_cost;
- len_header = min(len - LZX_MIN_MATCH_LEN,
- LZX_NUM_PRIMARY_LENS);
- main_symbol = ((position_slot << 3) | len_header) +
- LZX_NUM_CHARS;
- cost += c->costs.main[main_symbol];
- if (len_header == LZX_NUM_PRIMARY_LENS) {
+ if (len - LZX_MIN_MATCH_LEN < LZX_NUM_PRIMARY_LENS) {
+ len_header = len - LZX_MIN_MATCH_LEN;
+ } else {
+ len_header = LZX_NUM_PRIMARY_LENS;
cost += c->costs.len[len -
LZX_MIN_MATCH_LEN -
LZX_NUM_PRIMARY_LENS];
}
+
+ main_symbol = ((position_slot << 3) | len_header) +
+ LZX_NUM_CHARS;
+ cost += c->costs.main[main_symbol];
+
if (cost < optimum[cur_pos + len].cost) {
- optimum[cur_pos + len].queue = queue;
+ if (position_slot < LZX_NUM_RECENT_OFFSETS) {
+ optimum[cur_pos + len].queue = optimum[cur_pos].queue;
+ swap(optimum[cur_pos + len].queue.R[0],
+ optimum[cur_pos + len].queue.R[position_slot]);
+ } else {
+ optimum[cur_pos + len].queue.R[0] = offset;
+ optimum[cur_pos + len].queue.R[1] = optimum[cur_pos].queue.R[0];
+ optimum[cur_pos + len].queue.R[2] = optimum[cur_pos].queue.R[1];
+ }
optimum[cur_pos + len].prev.link = cur_pos;
optimum[cur_pos + len].prev.match_offset = offset;
optimum[cur_pos + len].cost = cost;
} while (++len <= matches[i].len);
}
- if (longest_rep_len >= LZX_MIN_MATCH_LEN) {
- struct lzx_lru_queue queue;
+ /* Consider coding a repeat offset match.
+ *
+ * As a heuristic, we only consider the longest length of the
+ * longest repeat offset match. This does not, however,
+ * necessarily mean that we will never consider any other repeat
+ * offsets, because above we detect repeat offset matches that
+ * were found by the regular match-finder. Therefore, this
+ * special handling of the longest repeat-offset match is only
+ * helpful for coding a repeat offset match that was *not* found
+ * by the match-finder, e.g. due to being obscured by a less
+ * distant match that is at least as long.
+ *
+ * Note: an alternative, used in LZMA, is to consider every
+ * length of every repeat offset match. This is a more thorough
+ * search, and it makes it unnecessary to detect repeat offset
+ * matches that were found by the regular match-finder. But by
+ * my tests, for LZX the LZMA method slows down the compressor
+ * by ~10% and doesn't actually help the compression ratio too
+ * much.
+ *
+ * Also tested a compromise approach: consider every 3rd length
+ * of the longest repeat offset match. Still didn't seem quite
+ * worth it, though.
+ */
+ if (longest_rep_len) {
+
+ LZX_ASSERT(longest_rep_len >= LZX_MIN_MATCH_LEN);
while (end_pos < cur_pos + longest_rep_len)
optimum[++end_pos].cost = MC_INFINITE_COST;
- queue = optimum[cur_pos].queue;
-
cost = optimum[cur_pos].cost +
- lzx_match_cost(longest_rep_len, longest_rep_offset,
- &c->costs, &queue);
+ lzx_repmatch_cost(longest_rep_len, longest_rep_slot,
+ &c->costs);
if (cost <= optimum[cur_pos + longest_rep_len].cost) {
optimum[cur_pos + longest_rep_len].queue =
- queue;
+ optimum[cur_pos].queue;
+ swap(optimum[cur_pos + longest_rep_len].queue.R[0],
+ optimum[cur_pos + longest_rep_len].queue.R[longest_rep_slot]);
optimum[cur_pos + longest_rep_len].prev.link =
cur_pos;
optimum[cur_pos + longest_rep_len].prev.match_offset =
- longest_rep_offset;
+ optimum[cur_pos + longest_rep_len].queue.R[0];
optimum[cur_pos + longest_rep_len].cost =
cost;
}
struct lz_match lz_match;
struct lzx_item lzx_item;
- LZX_ASSERT(num_passes >= 1);
+ LZX_ASSERT(num_passes_remaining >= 1);
LZX_ASSERT(lz_mf_get_position(c->mf) == spec->window_pos);
c->match_window_end = spec->window_pos + spec->block_size;
lzx_free_compressor(void *_c);
static u64
-lzx_get_needed_memory(size_t max_window_size, unsigned int compression_level)
+lzx_get_needed_memory(size_t max_block_size, unsigned int compression_level)
{
struct lzx_compressor_params params;
u64 size = 0;
+ unsigned window_order;
+ u32 max_window_size;
- if (!lzx_window_size_valid(max_window_size))
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return 0;
+ max_window_size = max_block_size;
lzx_build_params(compression_level, max_window_size, ¶ms);
}
static int
-lzx_create_compressor(size_t max_window_size, unsigned int compression_level,
+lzx_create_compressor(size_t max_block_size, unsigned int compression_level,
void **c_ret)
{
struct lzx_compressor *c;
struct lzx_compressor_params params;
struct lz_mf_params mf_params;
+ unsigned window_order;
+ u32 max_window_size;
- if (!lzx_window_size_valid(max_window_size))
+ window_order = lzx_get_window_order(max_block_size);
+ if (window_order == 0)
return WIMLIB_ERR_INVALID_PARAM;
+ max_window_size = max_block_size;
lzx_build_params(compression_level, max_window_size, ¶ms);
lzx_build_mf_params(¶ms, max_window_size, &mf_params);
goto oom;
c->params = params;
- c->num_main_syms = lzx_get_num_main_syms(max_window_size);
+ c->num_main_syms = lzx_get_num_main_syms(window_order);
c->max_window_size = max_window_size;
+ c->window_order = window_order;
c->cur_window = ALIGNED_MALLOC(max_window_size, 16);
if (!c->cur_window)
void *compressed_data, size_t compressed_size_avail, void *_c)
{
struct lzx_compressor *c = _c;
- struct output_bitstream ostream;
- size_t compressed_size;
+ struct lzx_output_bitstream os;
- if (uncompressed_size < 100) {
- LZX_DEBUG("Too small to bother compressing.");
+ /* Don't bother compressing very small inputs. */
+ if (uncompressed_size < 100)
return 0;
- }
-
- LZX_DEBUG("Attempting to compress %zu bytes...",
- uncompressed_size);
/* The input data must be preprocessed. To avoid changing the original
* input, copy it to a temporary buffer. */
memcpy(c->cur_window, uncompressed_data, uncompressed_size);
c->cur_window_size = uncompressed_size;
- /* Before doing any actual compression, do the call instruction (0xe8
- * byte) translation on the uncompressed data. */
+ /* Preprocess the data. */
lzx_do_e8_preprocessing(c->cur_window, c->cur_window_size);
/* Prepare the compressed data. */
lzx_prepare_blocks(c);
- /* Generate the compressed data. */
- init_output_bitstream(&ostream, compressed_data, compressed_size_avail);
- lzx_write_all_blocks(c, &ostream);
-
- compressed_size = flush_output_bitstream(&ostream);
- if (compressed_size == (u32)~0UL) {
- LZX_DEBUG("Data did not compress to %zu bytes or less!",
- compressed_size_avail);
- return 0;
- }
-
- LZX_DEBUG("Done: compressed %zu => %zu bytes.",
- uncompressed_size, compressed_size);
-
- return compressed_size;
+ /* Generate the compressed data and return its size, or 0 if an overflow
+ * occurred. */
+ lzx_init_output(&os, compressed_data, compressed_size_avail);
+ lzx_write_all_blocks(c, &os);
+ return lzx_flush_output(&os);
}
static void