X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Flzms-compress.c;h=7c103f7b42150695b366ef69170b161a9160b432;hp=31648f0aee6b4f12ae59ee9f53d556fb41d5e800;hb=dd38de3e27916c2d7fe97158c3df38c6b9b43e0d;hpb=abe65b1f2f12bcedec1103cc7924897720f919be diff --git a/src/lzms-compress.c b/src/lzms-compress.c index 31648f0a..7c103f7b 100644 --- a/src/lzms-compress.c +++ b/src/lzms-compress.c @@ -3,7 +3,7 @@ */ /* - * Copyright (C) 2013 Eric Biggers + * Copyright (C) 2013, 2014 Eric Biggers * * This file is part of wimlib, a library for working with WIM files. * @@ -24,8 +24,10 @@ /* This a compressor for the LZMS compression format. More details about this * format can be found in lzms-decompress.c. * - * This is currently an unsophisticated implementation that is fast but does not - * attain the best compression ratios allowed by the format. + * Also see lzx-compress.c for general information about match-finding and + * match-choosing that also applies to this LZMS compressor. + * + * NOTE: this compressor currently does not code any delta matches. */ #ifdef HAVE_CONFIG_H @@ -33,13 +35,14 @@ #endif #include "wimlib.h" +#include "wimlib/assert.h" #include "wimlib/compiler.h" #include "wimlib/compressor_ops.h" #include "wimlib/compress_common.h" #include "wimlib/endianness.h" #include "wimlib/error.h" -#include "wimlib/lz_hash.h" -#include "wimlib/lz_sarray.h" +#include "wimlib/lz.h" +#include "wimlib/lz_bt.h" #include "wimlib/lzms.h" #include "wimlib/util.h" @@ -47,20 +50,6 @@ #include #include -#define LZMS_OPTIM_ARRAY_SIZE 1024 - -struct lzms_compressor; -struct lzms_adaptive_state { - struct lzms_lz_lru_queues lru; - u8 main_state; - u8 match_state; - u8 lz_match_state; - u8 lz_repeat_match_state[LZMS_NUM_RECENT_OFFSETS - 1]; -}; -#define LZ_ADAPTIVE_STATE struct lzms_adaptive_state -#define LZ_COMPRESSOR struct lzms_compressor -#include "wimlib/lz_optimal.h" - /* Stucture used for writing raw bits to the end of the LZMS-compressed data as * a series of 16-bit little endian coding units. */ struct lzms_output_bitstream { @@ -127,7 +116,7 @@ struct lzms_range_encoder { * lzms_range_encoder_raw. */ struct lzms_range_encoder_raw *rc; - /* Bits recently encoded by this range encoder. This are used as in + /* Bits recently encoded by this range encoder. This is used as an * index into @prob_entries. */ u32 state; @@ -166,11 +155,13 @@ struct lzms_huffman_encoder { u8 lens[LZMS_MAX_NUM_SYMS]; /* The codeword of each symbol in the Huffman code. */ - u16 codewords[LZMS_MAX_NUM_SYMS]; + u32 codewords[LZMS_MAX_NUM_SYMS]; }; /* State of the LZMS compressor. */ struct lzms_compressor { + struct wimlib_lzms_compressor_params params; + /* Pointer to a buffer holding the preprocessed data to compress. */ u8 *window; @@ -180,20 +171,16 @@ struct lzms_compressor { /* Size of the data in @buffer. */ u32 window_size; -#if 0 - /* Temporary array used by lz_analyze_block(); must be at least as long - * as the window. */ - u32 *prev_tab; -#endif - - /* Suffix array match-finder. */ - struct lz_sarray lz_sarray; + /* Binary tree match-finder. */ + struct lz_bt mf; /* Temporary space to store found matches. */ struct raw_match *matches; - /* Match-chooser. */ - struct lz_match_chooser mc; + /* Match-chooser data. */ + struct lzms_mc_pos_data *optimum; + unsigned optimum_cur_idx; + unsigned optimum_end_idx; /* Maximum block size this compressor instantiation allows. This is the * allocated size of @window. */ @@ -229,6 +216,28 @@ struct lzms_compressor { s32 last_target_usages[65536]; }; +struct lzms_mc_pos_data { + u32 cost; +#define MC_INFINITE_COST ((u32)~0UL) + union { + struct { + u32 link; + u32 match_offset; + } prev; + struct { + u32 link; + u32 match_offset; + } next; + }; + struct lzms_adaptive_state { + struct lzms_lz_lru_queues lru; + u8 main_state; + u8 match_state; + u8 lz_match_state; + u8 lz_repeat_match_state[LZMS_NUM_RECENT_OFFSETS - 1]; + } state; +}; + /* Initialize the output bitstream @os to write forwards to the specified * compressed data buffer @out that is @out_limit 16-bit integers long. */ static void @@ -543,7 +552,7 @@ lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset) /* Main bit: 1 = a match, not a literal. */ lzms_range_encode_bit(&ctx->main_range_encoder, 1); - /* Match bit: 0 = a LZ match, not a delta match. */ + /* Match bit: 0 = an LZ match, not a delta match. */ lzms_range_encode_bit(&ctx->match_range_encoder, 0); /* Determine if the offset can be represented as a recent offset. */ @@ -594,63 +603,6 @@ lzms_encode_lz_match(struct lzms_compressor *ctx, u32 length, u32 offset) lzms_end_encode_item(ctx, length); } -#if 0 -static void -lzms_record_literal(u8 literal, void *_ctx) -{ - struct lzms_compressor *ctx = _ctx; - - lzms_encode_literal(ctx, literal); -} - -static void -lzms_record_match(unsigned length, unsigned offset, void *_ctx) -{ - struct lzms_compressor *ctx = _ctx; - - lzms_encode_lz_match(ctx, length, offset); -} - -static void -lzms_fast_encode(struct lzms_compressor *ctx) -{ - static const struct lz_params lzms_lz_params = { - .min_match = 3, - .max_match = UINT_MAX, - .max_offset = UINT_MAX, - .nice_match = 64, - .good_match = 32, - .max_chain_len = 64, - .max_lazy_match = 258, - .too_far = 4096, - }; - - lz_analyze_block(ctx->window, - ctx->window_size, - lzms_record_match, - lzms_record_literal, - ctx, - &lzms_lz_params, - ctx->prev_tab); - -} -#endif - -/* Fast heuristic cost evaluation to use in the inner loop of the match-finder. - * Unlike lzms_get_lz_match_cost(), which does a true cost evaluation, this - * simply prioritize matches based on their offset. */ -static input_idx_t -lzms_lz_match_cost_fast(input_idx_t length, input_idx_t offset, const void *_lru) -{ - const struct lzms_lz_lru_queues *lru = _lru; - - for (input_idx_t i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++) - if (offset == lru->recent_offsets[i]) - return i; - - return offset; -} - #define LZMS_COST_SHIFT 5 /*#define LZMS_RC_COSTS_USE_FLOATING_POINT*/ @@ -717,17 +669,9 @@ lzms_do_init_rc_costs(void) static void lzms_init_rc_costs(void) { - static bool done = false; - static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - - if (unlikely(!done)) { - pthread_mutex_lock(&mutex); - if (!done) { - lzms_do_init_rc_costs(); - done = true; - } - pthread_mutex_unlock(&mutex); - } + static pthread_once_t once = PTHREAD_ONCE_INIT; + + pthread_once(&once, lzms_do_init_rc_costs); } /* @@ -782,7 +726,7 @@ lzms_offset_cost(const struct lzms_huffman_encoder *enc, u32 offset) } static u32 -lzms_length_cost(const struct lzms_huffman_encoder *enc, u32 length) +lzms_get_length_cost(const struct lzms_huffman_encoder *enc, u32 length) { u32 slot; u32 num_extra_bits; @@ -800,29 +744,22 @@ lzms_length_cost(const struct lzms_huffman_encoder *enc, u32 length) } static u32 -lzms_get_matches(struct lzms_compressor *ctx, - const struct lzms_adaptive_state *state, - struct raw_match **matches_ret) +lzms_get_matches(struct lzms_compressor *ctx, struct raw_match **matches_ret) { *matches_ret = ctx->matches; - return lz_sarray_get_matches(&ctx->lz_sarray, - ctx->matches, - lzms_lz_match_cost_fast, - &state->lru); + return lz_bt_get_matches(&ctx->mf, ctx->matches); } static void -lzms_skip_bytes(struct lzms_compressor *ctx, input_idx_t n) +lzms_skip_bytes(struct lzms_compressor *ctx, u32 n) { - while (n--) - lz_sarray_skip_position(&ctx->lz_sarray); + lz_bt_skip_positions(&ctx->mf, n); } static u32 -lzms_get_prev_literal_cost(struct lzms_compressor *ctx, - struct lzms_adaptive_state *state) +lzms_get_literal_cost(struct lzms_compressor *ctx, + struct lzms_adaptive_state *state, u8 literal) { - u8 literal = ctx->window[lz_sarray_get_pos(&ctx->lz_sarray) - 1]; u32 cost = 0; state->lru.upcoming_offset = 0; @@ -837,9 +774,8 @@ lzms_get_prev_literal_cost(struct lzms_compressor *ctx, } static u32 -lzms_get_lz_match_cost(struct lzms_compressor *ctx, - struct lzms_adaptive_state *state, - input_idx_t length, input_idx_t offset) +lzms_get_lz_match_cost_nolen(struct lzms_compressor *ctx, + struct lzms_adaptive_state *state, u32 offset) { u32 cost = 0; int recent_offset_idx; @@ -882,7 +818,6 @@ lzms_get_lz_match_cost(struct lzms_compressor *ctx, state->lru.recent_offsets[i] = state->lru.recent_offsets[i + 1]; } - cost += lzms_length_cost(&ctx->length_encoder, length); state->lru.upcoming_offset = offset; lzms_update_lz_lru_queues(&state->lru); @@ -890,25 +825,290 @@ lzms_get_lz_match_cost(struct lzms_compressor *ctx, return cost; } +static u32 +lzms_get_lz_match_cost(struct lzms_compressor *ctx, + struct lzms_adaptive_state *state, + u32 length, u32 offset) +{ + return lzms_get_lz_match_cost_nolen(ctx, state, offset) + + lzms_get_length_cost(&ctx->length_encoder, length); +} + +static struct raw_match +lzms_match_chooser_reverse_list(struct lzms_compressor *ctx, unsigned cur_pos) +{ + unsigned prev_link, saved_prev_link; + unsigned prev_match_offset, saved_prev_match_offset; + + ctx->optimum_end_idx = cur_pos; + + saved_prev_link = ctx->optimum[cur_pos].prev.link; + saved_prev_match_offset = ctx->optimum[cur_pos].prev.match_offset; + + do { + prev_link = saved_prev_link; + prev_match_offset = saved_prev_match_offset; + + saved_prev_link = ctx->optimum[prev_link].prev.link; + saved_prev_match_offset = ctx->optimum[prev_link].prev.match_offset; + + ctx->optimum[prev_link].next.link = cur_pos; + ctx->optimum[prev_link].next.match_offset = prev_match_offset; + + cur_pos = prev_link; + } while (cur_pos != 0); + + ctx->optimum_cur_idx = ctx->optimum[0].next.link; + + return (struct raw_match) + { .len = ctx->optimum_cur_idx, + .offset = ctx->optimum[0].next.match_offset, + }; +} + +/* This is similar to lzx_get_near_optimal_match() in lzx-compress.c. + * Read that one if you want to understand it. */ static struct raw_match lzms_get_near_optimal_match(struct lzms_compressor *ctx) { + u32 num_matches; + struct raw_match *matches; + struct raw_match match; + u32 longest_len; + u32 longest_rep_len; + u32 longest_rep_offset; + unsigned cur_pos; + unsigned end_pos; struct lzms_adaptive_state initial_state; + if (ctx->optimum_cur_idx != ctx->optimum_end_idx) { + match.len = ctx->optimum[ctx->optimum_cur_idx].next.link - + ctx->optimum_cur_idx; + match.offset = ctx->optimum[ctx->optimum_cur_idx].next.match_offset; + + ctx->optimum_cur_idx = ctx->optimum[ctx->optimum_cur_idx].next.link; + return match; + } + + ctx->optimum_cur_idx = 0; + ctx->optimum_end_idx = 0; + + longest_rep_len = ctx->params.min_match_length - 1; + if (lz_bt_get_position(&ctx->mf) >= LZMS_MAX_INIT_RECENT_OFFSET) { + u32 limit = min(ctx->params.max_match_length, + lz_bt_get_remaining_size(&ctx->mf)); + for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++) { + u32 offset = ctx->lru.lz.recent_offsets[i]; + const u8 *strptr = lz_bt_get_window_ptr(&ctx->mf); + const u8 *matchptr = strptr - offset; + u32 len = 0; + while (len < limit && strptr[len] == matchptr[len]) + len++; + if (len > longest_rep_len) { + longest_rep_len = len; + longest_rep_offset = offset; + } + } + } + + if (longest_rep_len >= ctx->params.nice_match_length) { + lzms_skip_bytes(ctx, longest_rep_len); + return (struct raw_match) { + .len = longest_rep_len, + .offset = longest_rep_offset, + }; + } + + num_matches = lzms_get_matches(ctx, &matches); + + if (num_matches) { + longest_len = matches[num_matches - 1].len; + if (longest_len >= ctx->params.nice_match_length) { + lzms_skip_bytes(ctx, longest_len - 1); + return matches[num_matches - 1]; + } + } else { + longest_len = 1; + } + initial_state.lru = ctx->lru.lz; initial_state.main_state = ctx->main_range_encoder.state; initial_state.match_state = ctx->match_range_encoder.state; initial_state.lz_match_state = ctx->lz_match_range_encoder.state; for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS - 1; i++) - initial_state.lz_repeat_match_state[i] = - ctx->lz_repeat_match_range_encoders[i].state; - return lz_get_near_optimal_match(&ctx->mc, - lzms_get_matches, - lzms_skip_bytes, - lzms_get_prev_literal_cost, - lzms_get_lz_match_cost, - ctx, - &initial_state); + initial_state.lz_repeat_match_state[i] = ctx->lz_repeat_match_range_encoders[i].state; + + ctx->optimum[1].state = initial_state; + ctx->optimum[1].cost = lzms_get_literal_cost(ctx, + &ctx->optimum[1].state, + *(lz_bt_get_window_ptr(&ctx->mf) - 1)); + ctx->optimum[1].prev.link = 0; + + for (u32 i = 0, len = 2; i < num_matches; i++) { + u32 offset = matches[i].offset; + struct lzms_adaptive_state state; + u32 position_cost; + + state = initial_state; + position_cost = 0; + position_cost += lzms_get_lz_match_cost_nolen(ctx, &state, offset); + + do { + u32 cost; + + cost = position_cost; + cost += lzms_get_length_cost(&ctx->length_encoder, len); + + ctx->optimum[len].state = state; + ctx->optimum[len].prev.link = 0; + ctx->optimum[len].prev.match_offset = offset; + ctx->optimum[len].cost = cost; + } while (++len <= matches[i].len); + } + end_pos = longest_len; + + if (longest_rep_len >= ctx->params.min_match_length) { + struct lzms_adaptive_state state; + u32 cost; + + while (end_pos < longest_rep_len) + ctx->optimum[++end_pos].cost = MC_INFINITE_COST; + + state = initial_state; + cost = lzms_get_lz_match_cost(ctx, + &state, + longest_rep_len, + longest_rep_offset); + if (cost <= ctx->optimum[longest_rep_len].cost) { + ctx->optimum[longest_rep_len].state = state; + ctx->optimum[longest_rep_len].prev.link = 0; + ctx->optimum[longest_rep_len].prev.match_offset = longest_rep_offset; + ctx->optimum[longest_rep_len].cost = cost; + } + } + + cur_pos = 0; + for (;;) { + u32 cost; + struct lzms_adaptive_state state; + + cur_pos++; + + if (cur_pos == end_pos || cur_pos == ctx->params.optim_array_length) + return lzms_match_chooser_reverse_list(ctx, cur_pos); + + longest_rep_len = ctx->params.min_match_length - 1; + if (lz_bt_get_position(&ctx->mf) >= LZMS_MAX_INIT_RECENT_OFFSET) { + u32 limit = min(ctx->params.max_match_length, + lz_bt_get_remaining_size(&ctx->mf)); + for (int i = 0; i < LZMS_NUM_RECENT_OFFSETS; i++) { + u32 offset = ctx->optimum[cur_pos].state.lru.recent_offsets[i]; + const u8 *strptr = lz_bt_get_window_ptr(&ctx->mf); + const u8 *matchptr = strptr - offset; + u32 len = 0; + while (len < limit && strptr[len] == matchptr[len]) + len++; + if (len > longest_rep_len) { + longest_rep_len = len; + longest_rep_offset = offset; + } + } + } + + if (longest_rep_len >= ctx->params.nice_match_length) { + match = lzms_match_chooser_reverse_list(ctx, cur_pos); + + ctx->optimum[cur_pos].next.match_offset = longest_rep_offset; + ctx->optimum[cur_pos].next.link = cur_pos + longest_rep_len; + ctx->optimum_end_idx = cur_pos + longest_rep_len; + + lzms_skip_bytes(ctx, longest_rep_len); + + return match; + } + + num_matches = lzms_get_matches(ctx, &matches); + + if (num_matches) { + longest_len = matches[num_matches - 1].len; + if (longest_len >= ctx->params.nice_match_length) { + match = lzms_match_chooser_reverse_list(ctx, cur_pos); + + ctx->optimum[cur_pos].next.match_offset = + matches[num_matches - 1].offset; + ctx->optimum[cur_pos].next.link = cur_pos + longest_len; + ctx->optimum_end_idx = cur_pos + longest_len; + + lzms_skip_bytes(ctx, longest_len - 1); + + return match; + } + } else { + longest_len = 1; + } + + while (end_pos < cur_pos + longest_len) + ctx->optimum[++end_pos].cost = MC_INFINITE_COST; + + state = ctx->optimum[cur_pos].state; + cost = ctx->optimum[cur_pos].cost + + lzms_get_literal_cost(ctx, + &state, + *(lz_bt_get_window_ptr(&ctx->mf) - 1)); + if (cost < ctx->optimum[cur_pos + 1].cost) { + ctx->optimum[cur_pos + 1].state = state; + ctx->optimum[cur_pos + 1].cost = cost; + ctx->optimum[cur_pos + 1].prev.link = cur_pos; + } + + for (u32 i = 0, len = 2; i < num_matches; i++) { + u32 offset = matches[i].offset; + struct lzms_adaptive_state state; + u32 position_cost; + + state = ctx->optimum[cur_pos].state; + position_cost = ctx->optimum[cur_pos].cost; + position_cost += lzms_get_lz_match_cost_nolen(ctx, &state, offset); + + do { + u32 cost; + + cost = position_cost; + cost += lzms_get_length_cost(&ctx->length_encoder, len); + + if (cost < ctx->optimum[cur_pos + len].cost) { + ctx->optimum[cur_pos + len].state = state; + ctx->optimum[cur_pos + len].prev.link = cur_pos; + ctx->optimum[cur_pos + len].prev.match_offset = offset; + ctx->optimum[cur_pos + len].cost = cost; + } + } while (++len <= matches[i].len); + } + + if (longest_rep_len >= ctx->params.min_match_length) { + + while (end_pos < cur_pos + longest_rep_len) + ctx->optimum[++end_pos].cost = MC_INFINITE_COST; + + state = ctx->optimum[cur_pos].state; + + cost = ctx->optimum[cur_pos].cost + + lzms_get_lz_match_cost(ctx, + &state, + longest_rep_len, + longest_rep_offset); + if (cost <= ctx->optimum[cur_pos + longest_rep_len].cost) { + ctx->optimum[cur_pos + longest_rep_len].state = + state; + ctx->optimum[cur_pos + longest_rep_len].prev.link = + cur_pos; + ctx->optimum[cur_pos + longest_rep_len].prev.match_offset = + longest_rep_offset; + ctx->optimum[cur_pos + longest_rep_len].cost = + cost; + } + } + } } /* @@ -916,31 +1116,29 @@ lzms_get_near_optimal_match(struct lzms_compressor *ctx) * * Notes: * - * - This uses near-optimal LZ parsing backed by a suffix-array match-finder. - * More details can be found in the corresponding files (lz_optimal.h, - * lz_sarray.{h,c}). + * - This uses near-optimal LZ parsing backed by a binary tree match-finder. * - * - This does not output any delta matches. It would take a specialized - * algorithm to find them, then more code in lz_optimal.h and here to handle - * evaluating and outputting them. + * - This does not output any delta matches. * * - The costs of literals and matches are estimated using the range encoder * states and the semi-adaptive Huffman codes. Except for range encoding * states, costs are assumed to be constant throughout a single run of the - * parsing algorithm, which can parse up to LZMS_OPTIM_ARRAY_SIZE bytes of - * data. This introduces a source of inaccuracy because the probabilities and - * Huffman codes can change over this part of the data. + * parsing algorithm, which can parse up to @optim_array_length (from the + * `struct wimlib_lzms_compressor_params') bytes of data. This introduces a + * source of inaccuracy because the probabilities and Huffman codes can change + * over this part of the data. */ static void -lzms_normal_encode(struct lzms_compressor *ctx) +lzms_encode(struct lzms_compressor *ctx) { struct raw_match match; - /* Load window into suffix array match-finder. */ - lz_sarray_load_window(&ctx->lz_sarray, ctx->window, ctx->window_size); + /* Load window into the binary tree match-finder. */ + lz_bt_load_window(&ctx->mf, ctx->window, ctx->window_size); /* Reset the match-chooser. */ - lz_match_chooser_begin(&ctx->mc); + ctx->optimum_cur_idx = 0; + ctx->optimum_end_idx = 0; while (ctx->cur_window_pos != ctx->window_size) { match = lzms_get_near_optimal_match(ctx); @@ -993,7 +1191,6 @@ lzms_init_compressor(struct lzms_compressor *ctx, const u8 *udata, u32 ulen, /* Copy the uncompressed data into the @ctx->window buffer. */ memcpy(ctx->window, udata, ulen); - memset(&ctx->window[ulen], 0, 8); ctx->cur_window_pos = 0; ctx->window_size = ulen; @@ -1147,11 +1344,7 @@ lzms_compress(const void *uncompressed_data, size_t uncompressed_size, /* Compute and encode a literal/match sequence that decompresses to the * preprocessed data. */ -#if 1 - lzms_normal_encode(ctx); -#else - lzms_fast_encode(ctx); -#endif + lzms_encode(ctx); /* Get and return the compressed data size. */ compressed_size = lzms_finalize(ctx, compressed_data, @@ -1216,84 +1409,82 @@ lzms_free_compressor(void *_ctx) if (ctx) { FREE(ctx->window); -#if 0 - FREE(ctx->prev_tab); -#endif FREE(ctx->matches); - lz_sarray_destroy(&ctx->lz_sarray); - lz_match_chooser_destroy(&ctx->mc); + lz_bt_destroy(&ctx->mf); + FREE(ctx->optimum); FREE(ctx); } } -static const struct wimlib_lzms_compressor_params default_params = { - .hdr = sizeof(struct wimlib_lzms_compressor_params), +static const struct wimlib_lzms_compressor_params lzms_default = { + .hdr = { + .size = sizeof(struct wimlib_lzms_compressor_params), + }, .min_match_length = 2, .max_match_length = UINT32_MAX, .nice_match_length = 32, .max_search_depth = 50, - .max_matches_per_pos = 3, .optim_array_length = 1024, }; +static bool +lzms_params_valid(const struct wimlib_compressor_params_header *); + +static const struct wimlib_lzms_compressor_params * +lzms_get_params(const struct wimlib_compressor_params_header *_params) +{ + const struct wimlib_lzms_compressor_params *params = + (const struct wimlib_lzms_compressor_params*)_params; + + if (params == NULL) + params = &lzms_default; + + LZMS_ASSERT(lzms_params_valid(¶ms->hdr)); + + return params; +} + static int lzms_create_compressor(size_t max_block_size, const struct wimlib_compressor_params_header *_params, void **ctx_ret) { struct lzms_compressor *ctx; - const struct wimlib_lzms_compressor_params *params; + const struct wimlib_lzms_compressor_params *params = lzms_get_params(_params); if (max_block_size == 0 || max_block_size >= INT32_MAX) { LZMS_DEBUG("Invalid max_block_size (%u)", max_block_size); return WIMLIB_ERR_INVALID_PARAM; } - if (_params) - params = (const struct wimlib_lzms_compressor_params*)_params; - else - params = &default_params; - - if (params->max_match_length < params->min_match_length || - params->min_match_length < 2 || - params->optim_array_length == 0 || - min(params->max_match_length, params->nice_match_length) > 65536) { - LZMS_DEBUG("Invalid compression parameter!"); - return WIMLIB_ERR_INVALID_PARAM; - } - ctx = CALLOC(1, sizeof(struct lzms_compressor)); if (ctx == NULL) goto oom; - ctx->window = MALLOC(max_block_size + 8); + ctx->window = MALLOC(max_block_size); if (ctx->window == NULL) goto oom; -#if 0 - ctx->prev_tab = MALLOC(max_block_size * sizeof(ctx->prev_tab[0])); - if (ctx->prev_tab == NULL) - goto oom; -#endif - ctx->matches = MALLOC(min(params->max_match_length - params->min_match_length + 1, - params->max_matches_per_pos) * + params->max_search_depth + 2) * sizeof(ctx->matches[0])); if (ctx->matches == NULL) goto oom; - if (!lz_sarray_init(&ctx->lz_sarray, max_block_size, - params->min_match_length, - params->max_match_length, - params->max_search_depth, - params->max_matches_per_pos)) + if (!lz_bt_init(&ctx->mf, + max_block_size, + params->min_match_length, + params->max_match_length, + params->nice_match_length, + params->max_search_depth)) goto oom; - if (!lz_match_chooser_init(&ctx->mc, - params->optim_array_length, - params->nice_match_length, - params->max_match_length)) + ctx->optimum = MALLOC((params->optim_array_length + + min(params->nice_match_length, + params->max_match_length)) * + sizeof(ctx->optimum[0])); + if (!ctx->optimum) goto oom; /* Initialize position and length slot data if not done already. */ @@ -1303,6 +1494,7 @@ lzms_create_compressor(size_t max_block_size, lzms_init_rc_costs(); ctx->max_block_size = max_block_size; + memcpy(&ctx->params, params, sizeof(*params)); *ctx_ret = ctx; return 0; @@ -1312,7 +1504,46 @@ oom: return WIMLIB_ERR_NOMEM; } +static u64 +lzms_get_needed_memory(size_t max_block_size, + const struct wimlib_compressor_params_header *_params) +{ + const struct wimlib_lzms_compressor_params *params = lzms_get_params(_params); + + u64 size = 0; + + size += max_block_size; + size += sizeof(struct lzms_compressor); + size += lz_bt_get_needed_memory(max_block_size); + size += (params->optim_array_length + + min(params->nice_match_length, + params->max_match_length)) * + sizeof(((struct lzms_compressor *)0)->optimum[0]); + size += min(params->max_match_length - params->min_match_length + 1, + params->max_search_depth + 2) * + sizeof(((struct lzms_compressor*)0)->matches[0]); + return size; +} + +static bool +lzms_params_valid(const struct wimlib_compressor_params_header *_params) +{ + const struct wimlib_lzms_compressor_params *params = + (const struct wimlib_lzms_compressor_params*)_params; + + if (params->hdr.size != sizeof(*params) || + params->max_match_length < params->min_match_length || + params->min_match_length < 2 || + params->optim_array_length == 0 || + min(params->max_match_length, params->nice_match_length) > 65536) + return false; + + return true; +} + const struct compressor_ops lzms_compressor_ops = { + .params_valid = lzms_params_valid, + .get_needed_memory = lzms_get_needed_memory, .create_compressor = lzms_create_compressor, .compress = lzms_compress, .free_compressor = lzms_free_compressor,