]> wimlib.net Git - wimlib/blobdiff - src/lzx_compress.c
Use 'restrict' on pointer arguments to all compress() and decompress() functions
[wimlib] / src / lzx_compress.c
index d38c5819d31af3f80cc6414da762f0c93ddf0812..3f3b836c636f8dd2bbdac17ae6a7b2d97bb673e0 100644 (file)
@@ -82,7 +82,7 @@
  * cache.  However, fallback behavior (immediately terminating the block) on
  * cache overflow is still required.
  */
-#define LZX_CACHE_PER_POS      6
+#define LZX_CACHE_PER_POS      7
 
 /*
  * LZX_CACHE_LENGTH is the number of lz_match structures in the match cache,
 
 /*
  * LZX_BIT_COST is a scaling factor that represents the cost to output one bit.
- * THis makes it possible to consider fractional bit costs.
+ * This makes it possible to consider fractional bit costs.
  *
  * Note: this is only useful as a statistical trick for when the true costs are
  * unknown.  In reality, each token in LZX requires a whole number of bits to
 /*
  * LZX_HASH2_ORDER is the log base 2 of the number of entries in the hash table
  * for finding length 2 matches.  This can be as high as 16 (in which case the
- * hash function is trivial), but using a smaller hash table actually speeds up
+ * hash function is trivial), but using a smaller hash table speeds up
  * compression due to reduced cache pressure.
  */
 #define LZX_HASH2_ORDER                12
@@ -368,6 +368,10 @@ struct lzx_compressor {
        /* Pointer to the compress() implementation chosen at allocation time */
        void (*impl)(struct lzx_compressor *, struct lzx_output_bitstream *);
 
+       /* If true, the compressor need not preserve the input buffer if it
+        * compresses the data successfully.  */
+       bool destructive;
+
        /* The Huffman symbol frequency counters for the current block.  */
        struct lzx_freqs freqs;
 
@@ -442,7 +446,7 @@ struct lzx_compressor {
                         * contains the number of matches that were found at
                         * that position; this is followed by the matches
                         * themselves, if any, sorted by strictly increasing
-                        * length and strictly increasing offset.
+                        * length.
                         *
                         * Note: in rare cases, there will be a very high number
                         * of matches in the block and this array will overflow.
@@ -1620,13 +1624,11 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                                max_len = in_end - in_next;
                                nice_len = min(max_len, nice_len);
 
-                               /* This extra check is needed to ensure that
-                                * reading the next 3 bytes when looking for a
-                                * length 2 match is valid.  In addition, we
-                                * cannot allow ourselves to find a length 2
-                                * match of the very last two bytes with the
-                                * very first two bytes, since such a match has
-                                * an offset too large to be represented.  */
+                               /* This extra check is needed to ensure that we
+                                * never output a length 2 match of the very
+                                * last two bytes with the very first two bytes,
+                                * since such a match has an offset too large to
+                                * be represented.  */
                                if (unlikely(max_len < 3)) {
                                        in_next++;
                                        cache_ptr->length = 0;
@@ -1644,8 +1646,7 @@ lzx_compress_near_optimal(struct lzx_compressor *c,
                        if (matchfinder_node_valid(cur_match) &&
                            (LZX_HASH2_ORDER == 16 ||
                             load_u16_unaligned(&in_begin[cur_match]) ==
-                            load_u16_unaligned(in_next)) &&
-                           in_begin[cur_match + 2] != in_next[2])
+                            load_u16_unaligned(in_next)))
                        {
                                lz_matchptr->length = 2;
                                lz_matchptr->offset = in_next - &in_begin[cur_match];
@@ -2011,7 +2012,8 @@ lzx_get_compressor_size(size_t max_bufsize, unsigned compression_level)
 }
 
 static u64
-lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
+lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level,
+                     bool destructive)
 {
        u64 size = 0;
 
@@ -2019,13 +2021,14 @@ lzx_get_needed_memory(size_t max_bufsize, unsigned compression_level)
                return 0;
 
        size += lzx_get_compressor_size(max_bufsize, compression_level);
-       size += max_bufsize; /* in_buffer */
+       if (!destructive)
+               size += max_bufsize; /* in_buffer */
        return size;
 }
 
 static int
 lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
-                     void **c_ret)
+                     bool destructive, void **c_ret)
 {
        unsigned window_order;
        struct lzx_compressor *c;
@@ -2040,12 +2043,16 @@ lzx_create_compressor(size_t max_bufsize, unsigned compression_level,
        if (!c)
                goto oom0;
 
+       c->destructive = destructive;
+
        c->num_main_syms = lzx_get_num_main_syms(window_order);
        c->window_order = window_order;
 
-       c->in_buffer = MALLOC(max_bufsize);
-       if (!c->in_buffer)
-               goto oom1;
+       if (!c->destructive) {
+               c->in_buffer = MALLOC(max_bufsize);
+               if (!c->in_buffer)
+                       goto oom1;
+       }
 
        if (compression_level <= LZX_MAX_FAST_LEVEL) {
 
@@ -2113,18 +2120,22 @@ oom0:
 }
 
 static size_t
-lzx_compress(const void *in, size_t in_nbytes,
-            void *out, size_t out_nbytes_avail, void *_c)
+lzx_compress(const void *restrict in, size_t in_nbytes,
+            void *restrict out, size_t out_nbytes_avail, void *restrict _c)
 {
        struct lzx_compressor *c = _c;
        struct lzx_output_bitstream os;
+       size_t result;
 
        /* Don't bother trying to compress very small inputs.  */
        if (in_nbytes < 100)
                return 0;
 
        /* Copy the input data into the internal buffer and preprocess it.  */
-       memcpy(c->in_buffer, in, in_nbytes);
+       if (c->destructive)
+               c->in_buffer = (void *)in;
+       else
+               memcpy(c->in_buffer, in, in_nbytes);
        c->in_nbytes = in_nbytes;
        lzx_do_e8_preprocessing(c->in_buffer, in_nbytes);
 
@@ -2139,7 +2150,10 @@ lzx_compress(const void *in, size_t in_nbytes,
        (*c->impl)(c, &os);
 
        /* Flush the output bitstream and return the compressed size or 0.  */
-       return lzx_flush_output(&os);
+       result = lzx_flush_output(&os);
+       if (!result && c->destructive)
+               lzx_undo_e8_preprocessing(c->in_buffer, c->in_nbytes);
+       return result;
 }
 
 static void
@@ -2147,7 +2161,8 @@ lzx_free_compressor(void *_c)
 {
        struct lzx_compressor *c = _c;
 
-       FREE(c->in_buffer);
+       if (!c->destructive)
+               FREE(c->in_buffer);
        ALIGNED_FREE(c);
 }