*
* Inline functions for unaligned memory accesses.
*
+ * Author: Eric Biggers
+ * Year: 2014, 2015
+ *
* The author dedicates this file to the public domain.
* You can do whatever you want with this file.
*/
#define load_word_unaligned load_machine_word_t_unaligned
#define store_word_unaligned store_machine_word_t_unaligned
-static inline void
-copy_word_unaligned(const void *src, void *dst)
-{
- store_word_unaligned(load_word_unaligned(src), dst);
-}
-
-static inline machine_word_t
-repeat_byte(u8 b)
-{
- machine_word_t v;
-
- BUILD_BUG_ON(WORDSIZE != 4 && WORDSIZE != 8);
-
- v = b;
- v |= v << 8;
- v |= v << 16;
- v |= v << ((WORDSIZE == 8) ? 32 : 0);
- return v;
-}
-
static inline u16
-get_unaligned_u16_le(const void *p)
+get_unaligned_le16(const void *p)
{
u16 v;
}
static inline u32
-get_unaligned_u32_le(const void *p)
+get_unaligned_le32(const void *p)
{
u32 v;
}
static inline void
-put_unaligned_u16_le(u16 v, void *p)
+put_unaligned_le16(u16 v, void *p)
{
if (UNALIGNED_ACCESS_IS_FAST) {
store_le16_unaligned(cpu_to_le16(v), p);
}
static inline void
-put_unaligned_u32_le(u32 v, void *p)
+put_unaligned_le32(u32 v, void *p)
{
if (UNALIGNED_ACCESS_IS_FAST) {
store_le32_unaligned(cpu_to_le32(v), p);
}
}
+/*
+ * Given a 32-bit value that was loaded with the platform's native endianness,
+ * return a 32-bit value whose high-order 8 bits are 0 and whose low-order 24
+ * bits contain the first 3 bytes, arranged in octets in a platform-dependent
+ * order, at the memory location from which the input 32-bit value was loaded.
+ */
+static inline u32
+loaded_u32_to_u24(u32 v)
+{
+ if (CPU_IS_LITTLE_ENDIAN)
+ return v & 0xFFFFFF;
+ else
+ return v >> 8;
+}
+
+/*
+ * Load the next 3 bytes from the memory location @p into the 24 low-order bits
+ * of a 32-bit value. The order in which the 3 bytes will be arranged as octets
+ * in the 24 bits is platform-dependent. At least LOAD_U24_REQUIRED_NBYTES
+ * bytes must be available at @p; note that this may be more than 3.
+ */
+static inline u32
+load_u24_unaligned(const u8 *p)
+{
+#if UNALIGNED_ACCESS_IS_FAST
+# define LOAD_U24_REQUIRED_NBYTES 4
+ return loaded_u32_to_u24(load_u32_unaligned(p));
+#else
+# define LOAD_U24_REQUIRED_NBYTES 3
+# if CPU_IS_BIG_ENDIAN
+ return ((u32)p[2] << 0) | ((u32)p[1] << 8) | ((u32)p[0] << 16);
+# else
+ return ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16);
+# endif
+#endif
+}
+
+
#endif /* _WIMLIB_UNALIGNED_H */