+
+/*
+ * Copy a LZ77 match at (dst - offset) to dst.
+ *
+ * The length and offset must be already validated --- that is, (dst - offset)
+ * can't underrun the output buffer, and (dst + length) can't overrun the output
+ * buffer. Also, the length cannot be 0.
+ *
+ * @winend points to the byte past the end of the output buffer.
+ * This function won't write any data beyond this position.
+ */
+static inline void
+lz_copy(u8 *dst, u32 length, u32 offset, const u8 *winend)
+{
+ const u8 *src = dst - offset;
+#if defined(__x86_64__) || defined(__i386__)
+ /* Copy one 'unsigned long' at a time. On i386 and x86_64 this is
+ * faster than copying one byte at a time, unless the data is
+ * near-random and all the matches have very short lengths. Note that
+ * since this requires unaligned memory accesses, it won't necessarily
+ * be faster on every architecture.
+ *
+ * Also note that we might copy more than the length of the match. For
+ * example, if an 'unsigned long' is 8 bytes and the match is of length
+ * 5, then we'll simply copy 8 bytes. This is okay as long as we don't
+ * write beyond the end of the output buffer, hence the check for
+ * (winend - (dst + length) >= sizeof(unsigned long) - 1). */
+ if (offset >= sizeof(unsigned long) &&
+ winend - (dst + length) >= sizeof(unsigned long) - 1)
+ {
+ /* Access memory through a packed struct. This tricks the
+ * compiler into allowing unaligned memory accesses. */
+ struct ulong_wrapper {
+ unsigned long v;
+ } _packed_attribute;
+
+ const u8 * const end = dst + length;
+ unsigned long v;
+
+ v = ((struct ulong_wrapper *)src)->v;
+ ((struct ulong_wrapper *)dst)->v = v;
+ dst += sizeof(unsigned long);
+ src += sizeof(unsigned long);
+
+ if (dst < end) {
+ do {
+ v = ((struct ulong_wrapper *)src)->v;
+ ((struct ulong_wrapper *)dst)->v = v;
+ dst += sizeof(unsigned long);
+ src += sizeof(unsigned long);
+ } while (dst < end);
+ }
+
+ return;
+ }
+#endif
+ do {
+ *dst++ = *src++;
+ } while (--length);
+}
+
+#endif /* _WIMLIB_DECOMPRESS_COMMON_H */