]> wimlib.net Git - wimlib/blob - include/wimlib/unaligned.h
extract.c: replace tempnam() with mkstemp() on non-Windows
[wimlib] / include / wimlib / unaligned.h
1 /*
2  * unaligned.h
3  *
4  * Inline functions for unaligned memory accesses.
5  *
6  * Author:      Eric Biggers
7  * Year:        2014, 2015
8  *
9  * The author dedicates this file to the public domain.
10  * You can do whatever you want with this file.
11  */
12
13 #ifndef _WIMLIB_UNALIGNED_H
14 #define _WIMLIB_UNALIGNED_H
15
16 #include "wimlib/compiler.h"
17 #include "wimlib/endianness.h"
18 #include "wimlib/types.h"
19
20 #define DEFINE_UNALIGNED_TYPE(type)                             \
21 struct type##_unaligned {                                       \
22         type v;                                                 \
23 } _packed_attribute;                                            \
24                                                                 \
25 static inline type                                              \
26 load_##type##_unaligned(const void *p)                          \
27 {                                                               \
28         return ((const struct type##_unaligned *)p)->v;         \
29 }                                                               \
30                                                                 \
31 static inline void                                              \
32 store_##type##_unaligned(type val, void *p)                     \
33 {                                                               \
34         ((struct type##_unaligned *)p)->v = val;                \
35 }
36
37 DEFINE_UNALIGNED_TYPE(u16);
38 DEFINE_UNALIGNED_TYPE(u32);
39 DEFINE_UNALIGNED_TYPE(u64);
40 DEFINE_UNALIGNED_TYPE(le16);
41 DEFINE_UNALIGNED_TYPE(le32);
42 DEFINE_UNALIGNED_TYPE(le64);
43 DEFINE_UNALIGNED_TYPE(be16);
44 DEFINE_UNALIGNED_TYPE(be32);
45 DEFINE_UNALIGNED_TYPE(be64);
46 DEFINE_UNALIGNED_TYPE(size_t);
47 DEFINE_UNALIGNED_TYPE(machine_word_t);
48
49 #define load_word_unaligned     load_machine_word_t_unaligned
50 #define store_word_unaligned    store_machine_word_t_unaligned
51
52 static inline u16
53 get_unaligned_le16(const u8 *p)
54 {
55         if (UNALIGNED_ACCESS_IS_FAST)
56                 return le16_to_cpu(load_le16_unaligned(p));
57         else
58                 return ((u16)p[1] << 8) | p[0];
59 }
60
61 static inline u32
62 get_unaligned_le32(const u8 *p)
63 {
64         if (UNALIGNED_ACCESS_IS_FAST)
65                 return le32_to_cpu(load_le32_unaligned(p));
66         else
67                 return ((u32)p[3] << 24) | ((u32)p[2] << 16) |
68                         ((u32)p[1] << 8) | p[0];
69 }
70
71 static inline void
72 put_unaligned_le16(u16 v, u8 *p)
73 {
74         if (UNALIGNED_ACCESS_IS_FAST) {
75                 store_le16_unaligned(cpu_to_le16(v), p);
76         } else {
77                 p[0] = (u8)(v >> 0);
78                 p[1] = (u8)(v >> 8);
79         }
80 }
81
82 static inline void
83 put_unaligned_le32(u32 v, u8 *p)
84 {
85         if (UNALIGNED_ACCESS_IS_FAST) {
86                 store_le32_unaligned(cpu_to_le32(v), p);
87         } else {
88                 p[0] = (u8)(v >> 0);
89                 p[1] = (u8)(v >> 8);
90                 p[2] = (u8)(v >> 16);
91                 p[3] = (u8)(v >> 24);
92         }
93 }
94
95 /*
96  * Given a 32-bit value that was loaded with the platform's native endianness,
97  * return a 32-bit value whose high-order 8 bits are 0 and whose low-order 24
98  * bits contain the first 3 bytes, arranged in octets in a platform-dependent
99  * order, at the memory location from which the input 32-bit value was loaded.
100  */
101 static inline u32
102 loaded_u32_to_u24(u32 v)
103 {
104         if (CPU_IS_LITTLE_ENDIAN)
105                 return v & 0xFFFFFF;
106         else
107                 return v >> 8;
108 }
109
110 /*
111  * Load the next 3 bytes from the memory location @p into the 24 low-order bits
112  * of a 32-bit value.  The order in which the 3 bytes will be arranged as octets
113  * in the 24 bits is platform-dependent.  At least LOAD_U24_REQUIRED_NBYTES
114  * bytes must be available at @p; note that this may be more than 3.
115  */
116 static inline u32
117 load_u24_unaligned(const u8 *p)
118 {
119 #if UNALIGNED_ACCESS_IS_FAST
120 #  define LOAD_U24_REQUIRED_NBYTES 4
121         return loaded_u32_to_u24(load_u32_unaligned(p));
122 #else
123 #  define LOAD_U24_REQUIRED_NBYTES 3
124 #  if CPU_IS_BIG_ENDIAN
125         return ((u32)p[2] << 0) | ((u32)p[1] << 8) | ((u32)p[0] << 16);
126 #  else
127         return ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16);
128 #  endif
129 #endif
130 }
131
132
133 #endif /* _WIMLIB_UNALIGNED_H */