/*
* Copyright (C) 2012, 2013 Eric Biggers
*
- * This file is part of wimlib, a library for working with WIM files.
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
*
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 3 of the License, or (at your option)
- * any later version.
- *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
- * You should have received a copy of the GNU General Public License
- * along with wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
*/
#ifdef HAVE_CONFIG_H
#include "wimlib/endianness.h"
#include "wimlib/lzx.h"
+#include "wimlib/unaligned.h"
#include "wimlib/util.h"
#ifdef __SSE2__
}
static void
-do_translate_target(sle32 *target, s32 input_pos)
+do_translate_target(void *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
- /* XXX: This assumes unaligned memory accesses are okay. */
- rel_offset = le32_to_cpu(*target);
+ rel_offset = le32_to_cpu(load_le32_unaligned(target));
if (rel_offset >= -input_pos && rel_offset < LZX_WIM_MAGIC_FILESIZE) {
if (rel_offset < LZX_WIM_MAGIC_FILESIZE - input_pos) {
/* "good translation" */
/* "compensating translation" */
abs_offset = rel_offset - LZX_WIM_MAGIC_FILESIZE;
}
- *target = cpu_to_le32(abs_offset);
+ store_le32_unaligned(cpu_to_le32(abs_offset), target);
}
}
static void
-undo_translate_target(sle32 *target, s32 input_pos)
+undo_translate_target(void *target, s32 input_pos)
{
s32 abs_offset, rel_offset;
- /* XXX: This assumes unaligned memory accesses are okay. */
- abs_offset = le32_to_cpu(*target);
+ abs_offset = le32_to_cpu(load_le32_unaligned(target));
if (abs_offset >= 0) {
if (abs_offset < LZX_WIM_MAGIC_FILESIZE) {
/* "good translation" */
rel_offset = abs_offset - input_pos;
- *target = cpu_to_le32(rel_offset);
+ store_le32_unaligned(cpu_to_le32(rel_offset), target);
}
} else {
if (abs_offset >= -input_pos) {
/* "compensating translation" */
rel_offset = abs_offset + LZX_WIM_MAGIC_FILESIZE;
- *target = cpu_to_le32(rel_offset);
+ store_le32_unaligned(cpu_to_le32(rel_offset), target);
}
}
}
SSE2 case, it bloats the binary more. */
#endif
void
-lzx_e8_filter(u8 *data, u32 size, void (*process_target)(sle32 *, s32))
+lzx_e8_filter(u8 *data, u32 size, void (*process_target)(void *, s32))
{
#ifdef __SSE2__
/* SSE2 vectorized implementation for x86_64. This speeds up LZX
/* Do (or undo) the e8 translation. */
u8 *p8 = (u8 *)p128 + bit;
- (*process_target)((sle32 *)(p8 + 1),
+ (*process_target)(p8 + 1,
p8 - data);
/* Don't start an e8 translation in the
u8 *p8_end = data + size - 10;
do {
if (*p8 == 0xe8) {
- (*process_target)((sle32 *)(p8 + 1), p8 - data);
+ (*process_target)(p8 + 1, p8 - data);
p8 += 5;
} else {
p8++;