4 * Inline functions for unaligned memory accesses.
9 * The author dedicates this file to the public domain.
10 * You can do whatever you want with this file.
13 #ifndef _WIMLIB_UNALIGNED_H
14 #define _WIMLIB_UNALIGNED_H
16 #include "wimlib/compiler.h"
17 #include "wimlib/endianness.h"
18 #include "wimlib/types.h"
20 #define DEFINE_UNALIGNED_TYPE(type) \
21 struct type##_unaligned { \
23 } _packed_attribute; \
26 load_##type##_unaligned(const void *p) \
28 return ((const struct type##_unaligned *)p)->v; \
32 store_##type##_unaligned(type val, void *p) \
34 ((struct type##_unaligned *)p)->v = val; \
37 DEFINE_UNALIGNED_TYPE(u16);
38 DEFINE_UNALIGNED_TYPE(u32);
39 DEFINE_UNALIGNED_TYPE(u64);
40 DEFINE_UNALIGNED_TYPE(le16);
41 DEFINE_UNALIGNED_TYPE(le32);
42 DEFINE_UNALIGNED_TYPE(le64);
43 DEFINE_UNALIGNED_TYPE(be16);
44 DEFINE_UNALIGNED_TYPE(be32);
45 DEFINE_UNALIGNED_TYPE(be64);
46 DEFINE_UNALIGNED_TYPE(size_t);
47 DEFINE_UNALIGNED_TYPE(machine_word_t);
49 #define load_word_unaligned load_machine_word_t_unaligned
50 #define store_word_unaligned store_machine_word_t_unaligned
53 get_unaligned_u16_le(const void *p)
57 if (UNALIGNED_ACCESS_IS_FAST) {
58 v = le16_to_cpu(load_le16_unaligned(p));
69 get_unaligned_u32_le(const void *p)
73 if (UNALIGNED_ACCESS_IS_FAST) {
74 v = le32_to_cpu(load_le32_unaligned(p));
80 v |= (u32)p8[2] << 16;
81 v |= (u32)p8[3] << 24;
87 put_unaligned_u16_le(u16 v, void *p)
89 if (UNALIGNED_ACCESS_IS_FAST) {
90 store_le16_unaligned(cpu_to_le16(v), p);
93 p8[0] = (v >> 0) & 0xFF;
94 p8[1] = (v >> 8) & 0xFF;
99 put_unaligned_u32_le(u32 v, void *p)
101 if (UNALIGNED_ACCESS_IS_FAST) {
102 store_le32_unaligned(cpu_to_le32(v), p);
105 p8[0] = (v >> 0) & 0xFF;
106 p8[1] = (v >> 8) & 0xFF;
107 p8[2] = (v >> 16) & 0xFF;
108 p8[3] = (v >> 24) & 0xFF;
113 * Given a 32-bit value that was loaded with the platform's native endianness,
114 * return a 32-bit value whose high-order 8 bits are 0 and whose low-order 24
115 * bits contain the first 3 bytes, arranged in octets in a platform-dependent
116 * order, at the memory location from which the input 32-bit value was loaded.
119 loaded_u32_to_u24(u32 v)
121 if (CPU_IS_LITTLE_ENDIAN)
128 * Load the next 3 bytes from the memory location @p into the 24 low-order bits
129 * of a 32-bit value. The order in which the 3 bytes will be arranged as octets
130 * in the 24 bits is platform-dependent. At least LOAD_U24_REQUIRED_NBYTES
131 * bytes must be available at @p; note that this may be more than 3.
134 load_u24_unaligned(const u8 *p)
136 #if UNALIGNED_ACCESS_IS_FAST
137 # define LOAD_U24_REQUIRED_NBYTES 4
138 return loaded_u32_to_u24(load_u32_unaligned(p));
140 # define LOAD_U24_REQUIRED_NBYTES 3
141 return ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16);
146 #endif /* _WIMLIB_UNALIGNED_H */