Misc. cleanups
[wimlib] / include / wimlib / unaligned.h
1 /*
2  * unaligned.h
3  *
4  * Inline functions for unaligned memory accesses.
5  *
6  * The author dedicates this file to the public domain.
7  * You can do whatever you want with this file.
8  */
9
10 #ifndef _WIMLIB_UNALIGNED_H
11 #define _WIMLIB_UNALIGNED_H
12
13 #include "wimlib/compiler.h"
14 #include "wimlib/endianness.h"
15 #include "wimlib/types.h"
16
17 #define DEFINE_UNALIGNED_TYPE(type)                             \
18 struct type##_unaligned {                                       \
19         type v;                                                 \
20 } _packed_attribute;                                            \
21                                                                 \
22 static inline type                                              \
23 load_##type##_unaligned(const void *p)                          \
24 {                                                               \
25         return ((const struct type##_unaligned *)p)->v;         \
26 }                                                               \
27                                                                 \
28 static inline void                                              \
29 store_##type##_unaligned(type val, void *p)                     \
30 {                                                               \
31         ((struct type##_unaligned *)p)->v = val;                \
32 }
33
34 DEFINE_UNALIGNED_TYPE(u16);
35 DEFINE_UNALIGNED_TYPE(u32);
36 DEFINE_UNALIGNED_TYPE(u64);
37 DEFINE_UNALIGNED_TYPE(le16);
38 DEFINE_UNALIGNED_TYPE(le32);
39 DEFINE_UNALIGNED_TYPE(le64);
40 DEFINE_UNALIGNED_TYPE(be16);
41 DEFINE_UNALIGNED_TYPE(be32);
42 DEFINE_UNALIGNED_TYPE(be64);
43 DEFINE_UNALIGNED_TYPE(size_t);
44 DEFINE_UNALIGNED_TYPE(machine_word_t);
45
46 #define load_word_unaligned     load_machine_word_t_unaligned
47 #define store_word_unaligned    store_machine_word_t_unaligned
48
49 static inline u16
50 get_unaligned_u16_le(const void *p)
51 {
52         u16 v;
53
54         if (UNALIGNED_ACCESS_IS_FAST) {
55                 v = le16_to_cpu(load_le16_unaligned(p));
56         } else {
57                 const u8 *p8 = p;
58                 v = 0;
59                 v |= (u16)p8[0] << 0;
60                 v |= (u16)p8[1] << 8;
61         }
62         return v;
63 }
64
65 static inline u32
66 get_unaligned_u32_le(const void *p)
67 {
68         u32 v;
69
70         if (UNALIGNED_ACCESS_IS_FAST) {
71                 v = le32_to_cpu(load_le32_unaligned(p));
72         } else {
73                 const u8 *p8 = p;
74                 v = 0;
75                 v |= (u32)p8[0] << 0;
76                 v |= (u32)p8[1] << 8;
77                 v |= (u32)p8[2] << 16;
78                 v |= (u32)p8[3] << 24;
79         }
80         return v;
81 }
82
83 static inline void
84 put_unaligned_u16_le(u16 v, void *p)
85 {
86         if (UNALIGNED_ACCESS_IS_FAST) {
87                 store_le16_unaligned(cpu_to_le16(v), p);
88         } else {
89                 u8 *p8 = p;
90                 p8[0] = (v >> 0) & 0xFF;
91                 p8[1] = (v >> 8) & 0xFF;
92         }
93 }
94
95 static inline void
96 put_unaligned_u32_le(u32 v, void *p)
97 {
98         if (UNALIGNED_ACCESS_IS_FAST) {
99                 store_le32_unaligned(cpu_to_le32(v), p);
100         } else {
101                 u8 *p8 = p;
102                 p8[0] = (v >> 0) & 0xFF;
103                 p8[1] = (v >> 8) & 0xFF;
104                 p8[2] = (v >> 16) & 0xFF;
105                 p8[3] = (v >> 24) & 0xFF;
106         }
107 }
108
109 #endif /* _WIMLIB_UNALIGNED_H */