/*
- * unaligned.h
+ * unaligned.h - inline functions for unaligned memory accesses
*
- * Inline functions for unaligned memory accesses.
+ * The following copying information applies to this specific source code file:
*
- * Author: Eric Biggers
- * Year: 2014, 2015
+ * Written in 2014-2015 by Eric Biggers <ebiggers3@gmail.com>
*
- * The author dedicates this file to the public domain.
- * You can do whatever you want with this file.
+ * To the extent possible under law, the author(s) have dedicated all copyright
+ * and related and neighboring rights to this software to the public domain
+ * worldwide via the Creative Commons Zero 1.0 Universal Public Domain
+ * Dedication (the "CC0").
+ *
+ * This software is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the CC0 for more details.
+ *
+ * You should have received a copy of the CC0 along with this software; if not
+ * see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef _WIMLIB_UNALIGNED_H
type v; \
} _packed_attribute; \
\
-static inline type \
+static forceinline type \
load_##type##_unaligned(const void *p) \
{ \
return ((const struct type##_unaligned *)p)->v; \
} \
\
-static inline void \
+static forceinline void \
store_##type##_unaligned(type val, void *p) \
{ \
((struct type##_unaligned *)p)->v = val; \
#define load_word_unaligned load_machine_word_t_unaligned
#define store_word_unaligned store_machine_word_t_unaligned
-static inline u16
-get_unaligned_le16(const void *p)
+static forceinline u16
+get_unaligned_le16(const u8 *p)
{
- u16 v;
-
- if (UNALIGNED_ACCESS_IS_FAST) {
- v = le16_to_cpu(load_le16_unaligned(p));
- } else {
- const u8 *p8 = p;
- v = 0;
- v |= (u16)p8[0] << 0;
- v |= (u16)p8[1] << 8;
- }
- return v;
+ if (UNALIGNED_ACCESS_IS_FAST)
+ return le16_to_cpu(load_le16_unaligned(p));
+ else
+ return ((u16)p[1] << 8) | p[0];
}
-static inline u32
-get_unaligned_le32(const void *p)
+static forceinline u32
+get_unaligned_le32(const u8 *p)
{
- u32 v;
-
- if (UNALIGNED_ACCESS_IS_FAST) {
- v = le32_to_cpu(load_le32_unaligned(p));
- } else {
- const u8 *p8 = p;
- v = 0;
- v |= (u32)p8[0] << 0;
- v |= (u32)p8[1] << 8;
- v |= (u32)p8[2] << 16;
- v |= (u32)p8[3] << 24;
- }
- return v;
+ if (UNALIGNED_ACCESS_IS_FAST)
+ return le32_to_cpu(load_le32_unaligned(p));
+ else
+ return ((u32)p[3] << 24) | ((u32)p[2] << 16) |
+ ((u32)p[1] << 8) | p[0];
}
-static inline void
-put_unaligned_le16(u16 v, void *p)
+static forceinline void
+put_unaligned_le16(u16 v, u8 *p)
{
if (UNALIGNED_ACCESS_IS_FAST) {
store_le16_unaligned(cpu_to_le16(v), p);
} else {
- u8 *p8 = p;
- p8[0] = (v >> 0) & 0xFF;
- p8[1] = (v >> 8) & 0xFF;
+ p[0] = (u8)(v >> 0);
+ p[1] = (u8)(v >> 8);
}
}
-static inline void
-put_unaligned_le32(u32 v, void *p)
+static forceinline void
+put_unaligned_le32(u32 v, u8 *p)
{
if (UNALIGNED_ACCESS_IS_FAST) {
store_le32_unaligned(cpu_to_le32(v), p);
} else {
- u8 *p8 = p;
- p8[0] = (v >> 0) & 0xFF;
- p8[1] = (v >> 8) & 0xFF;
- p8[2] = (v >> 16) & 0xFF;
- p8[3] = (v >> 24) & 0xFF;
+ p[0] = (u8)(v >> 0);
+ p[1] = (u8)(v >> 8);
+ p[2] = (u8)(v >> 16);
+ p[3] = (u8)(v >> 24);
}
}
* bits contain the first 3 bytes, arranged in octets in a platform-dependent
* order, at the memory location from which the input 32-bit value was loaded.
*/
-static inline u32
+static forceinline u32
loaded_u32_to_u24(u32 v)
{
if (CPU_IS_LITTLE_ENDIAN)
* in the 24 bits is platform-dependent. At least LOAD_U24_REQUIRED_NBYTES
* bytes must be available at @p; note that this may be more than 3.
*/
-static inline u32
+static forceinline u32
load_u24_unaligned(const u8 *p)
{
#if UNALIGNED_ACCESS_IS_FAST