]> wimlib.net Git - wimlib/blob - include/wimlib/unaligned.h
Merge LZX compression updates
[wimlib] / include / wimlib / unaligned.h
1 /*
2  * unaligned.h
3  *
4  * Inline functions for unaligned memory accesses.
5  *
6  * Author:      Eric Biggers
7  * Year:        2014, 2015
8  *
9  * The author dedicates this file to the public domain.
10  * You can do whatever you want with this file.
11  */
12
13 #ifndef _WIMLIB_UNALIGNED_H
14 #define _WIMLIB_UNALIGNED_H
15
16 #include "wimlib/compiler.h"
17 #include "wimlib/endianness.h"
18 #include "wimlib/types.h"
19
20 #define DEFINE_UNALIGNED_TYPE(type)                             \
21 struct type##_unaligned {                                       \
22         type v;                                                 \
23 } _packed_attribute;                                            \
24                                                                 \
25 static inline type                                              \
26 load_##type##_unaligned(const void *p)                          \
27 {                                                               \
28         return ((const struct type##_unaligned *)p)->v;         \
29 }                                                               \
30                                                                 \
31 static inline void                                              \
32 store_##type##_unaligned(type val, void *p)                     \
33 {                                                               \
34         ((struct type##_unaligned *)p)->v = val;                \
35 }
36
37 DEFINE_UNALIGNED_TYPE(u16);
38 DEFINE_UNALIGNED_TYPE(u32);
39 DEFINE_UNALIGNED_TYPE(u64);
40 DEFINE_UNALIGNED_TYPE(le16);
41 DEFINE_UNALIGNED_TYPE(le32);
42 DEFINE_UNALIGNED_TYPE(le64);
43 DEFINE_UNALIGNED_TYPE(be16);
44 DEFINE_UNALIGNED_TYPE(be32);
45 DEFINE_UNALIGNED_TYPE(be64);
46 DEFINE_UNALIGNED_TYPE(size_t);
47 DEFINE_UNALIGNED_TYPE(machine_word_t);
48
49 #define load_word_unaligned     load_machine_word_t_unaligned
50 #define store_word_unaligned    store_machine_word_t_unaligned
51
52 static inline u16
53 get_unaligned_u16_le(const void *p)
54 {
55         u16 v;
56
57         if (UNALIGNED_ACCESS_IS_FAST) {
58                 v = le16_to_cpu(load_le16_unaligned(p));
59         } else {
60                 const u8 *p8 = p;
61                 v = 0;
62                 v |= (u16)p8[0] << 0;
63                 v |= (u16)p8[1] << 8;
64         }
65         return v;
66 }
67
68 static inline u32
69 get_unaligned_u32_le(const void *p)
70 {
71         u32 v;
72
73         if (UNALIGNED_ACCESS_IS_FAST) {
74                 v = le32_to_cpu(load_le32_unaligned(p));
75         } else {
76                 const u8 *p8 = p;
77                 v = 0;
78                 v |= (u32)p8[0] << 0;
79                 v |= (u32)p8[1] << 8;
80                 v |= (u32)p8[2] << 16;
81                 v |= (u32)p8[3] << 24;
82         }
83         return v;
84 }
85
86 static inline void
87 put_unaligned_u16_le(u16 v, void *p)
88 {
89         if (UNALIGNED_ACCESS_IS_FAST) {
90                 store_le16_unaligned(cpu_to_le16(v), p);
91         } else {
92                 u8 *p8 = p;
93                 p8[0] = (v >> 0) & 0xFF;
94                 p8[1] = (v >> 8) & 0xFF;
95         }
96 }
97
98 static inline void
99 put_unaligned_u32_le(u32 v, void *p)
100 {
101         if (UNALIGNED_ACCESS_IS_FAST) {
102                 store_le32_unaligned(cpu_to_le32(v), p);
103         } else {
104                 u8 *p8 = p;
105                 p8[0] = (v >> 0) & 0xFF;
106                 p8[1] = (v >> 8) & 0xFF;
107                 p8[2] = (v >> 16) & 0xFF;
108                 p8[3] = (v >> 24) & 0xFF;
109         }
110 }
111
112 #endif /* _WIMLIB_UNALIGNED_H */