X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=src%2Flzx_compress.c;h=430e0dd414c0fc5aacb2f760ce03157e0bcb17ab;hb=94a694bc31f89cec150b23189fb51d0c63d183dc;hp=ac87a807bd9a3b726e891c99b1a0150ec2cba868;hpb=df84fc50295e41ea1616b68685c6d8dea8ffd84e;p=wimlib diff --git a/src/lzx_compress.c b/src/lzx_compress.c index ac87a807..430e0dd4 100644 --- a/src/lzx_compress.c +++ b/src/lzx_compress.c @@ -182,7 +182,6 @@ #include "wimlib/compress_common.h" #include "wimlib/compressor_ops.h" #include "wimlib/error.h" -#include "wimlib/lz_extend.h" #include "wimlib/lzx_common.h" #include "wimlib/unaligned.h" #include "wimlib/util.h" @@ -288,7 +287,7 @@ struct lzx_sequence { u32 adjusted_offset_and_mainsym; #define SEQ_MAINSYM_BITS 10 #define SEQ_MAINSYM_MASK (((u32)1 << SEQ_MAINSYM_BITS) - 1) -} _aligned_attribute(8); +} __attribute__((aligned(8))); /* * This structure represents a byte position in the input buffer and a node in @@ -334,7 +333,7 @@ struct lzx_optimum_node { # define OPTIMUM_GAP_MATCH 0x80000000 #endif -} _aligned_attribute(8); +} __attribute__((aligned(8))); /* The cost model for near-optimal parsing */ struct lzx_costs { @@ -1227,7 +1226,7 @@ lzx_flush_block(struct lzx_compressor *c, struct lzx_output_bitstream *os, * but rather we combine many symbols into a single "observation type". For * literals we only look at the high bits and low bits, and for matches we only * look at whether the match is long or not. The assumption is that for typical - * "real" data, places that are good block boundaries will tend to be noticable + * "real" data, places that are good block boundaries will tend to be noticeable * based only on changes in these aggregate frequencies, without looking for * subtle differences in individual symbols. For example, a change from ASCII * bytes to non-ASCII bytes, or from few matches (generally less compressible) @@ -1316,7 +1315,7 @@ lzx_should_end_block(struct lzx_block_split_stats *stats) */ struct lzx_lru_queue { u64 R; -} _aligned_attribute(8); +} __attribute__((aligned(8))); #define LZX_QUEUE_OFFSET_SHIFT 21 #define LZX_QUEUE_OFFSET_MASK (((u64)1 << LZX_QUEUE_OFFSET_SHIFT) - 1) @@ -2273,7 +2272,7 @@ lzx_compress_near_optimal(struct lzx_compressor * restrict c, } else { /* Don't search for matches at this position. */ CALL_BT_MF(is_16_bit, c, - bt_matchfinder_skip_position, + bt_matchfinder_skip_byte, in_begin, in_next - in_begin, nice_len, @@ -2569,7 +2568,7 @@ lzx_compress_lazy(struct lzx_compressor * restrict c, cur_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match, in_begin, - in_next - in_begin, + in_next, 2, max_len, nice_len, @@ -2646,7 +2645,7 @@ lzx_compress_lazy(struct lzx_compressor * restrict c, next_len = CALL_HC_MF(is_16_bit, c, hc_matchfinder_longest_match, in_begin, - in_next - in_begin, + in_next, cur_len - 2, max_len, nice_len, @@ -2707,13 +2706,14 @@ lzx_compress_lazy(struct lzx_compressor * restrict c, lzx_choose_match(c, cur_len, cur_adjusted_offset, recent_offsets, is_16_bit, &litrunlen, &next_seq); - in_next = CALL_HC_MF(is_16_bit, c, - hc_matchfinder_skip_positions, - in_begin, - in_next - in_begin, - in_end - in_begin, - skip_len, - next_hashes); + CALL_HC_MF(is_16_bit, c, + hc_matchfinder_skip_bytes, + in_begin, + in_next, + in_end, + skip_len, + next_hashes); + in_next += skip_len; /* Keep going until it's time to end the block. */ } while (in_next < in_max_block_end &&