1 ///////////////////////////////////////////////////////////////////////////////
4 /// \brief Reading and writing integers from and to buffers
6 // Author: Lasse Collin
8 // This file has been put into the public domain.
9 // You can do whatever you want with this file.
11 ///////////////////////////////////////////////////////////////////////////////
13 #ifndef LZMA_INTEGER_H
14 #define LZMA_INTEGER_H
16 // On big endian, we need byte swapping. These macros may be used outside
17 // this file, so don't put these inside HAVE_FAST_UNALIGNED_ACCESS.
18 #ifdef WORDS_BIGENDIAN
20 # define integer_le_16(n) bswap_16(n)
21 # define integer_le_32(n) bswap_32(n)
22 # define integer_le_64(n) bswap_64(n)
24 # define integer_le_16(n) (n)
25 # define integer_le_32(n) (n)
26 # define integer_le_64(n) (n)
30 // I'm aware of AC_CHECK_ALIGNED_ACCESS_REQUIRED from Autoconf archive, but
31 // it's not useful here. We don't care if unaligned access is supported,
32 // we care if it is fast. Some systems can emulate unaligned access in
33 // software, which is horribly slow; we want to use byte-by-byte access on
34 // such systems but the Autoconf test would detect such a system as
35 // supporting unaligned access.
37 // NOTE: HAVE_FAST_UNALIGNED_ACCESS indicates only support for 16-bit and
38 // 32-bit integer loads and stores. 64-bit integers may or may not work.
39 // That's why 64-bit functions are commented out.
41 // TODO: Big endian PowerPC supports byte swapping load and store instructions
42 // that also allow unaligned access. Inline assembler could be OK for that.
44 // Performance of these functions isn't that important until LZMA3, but it
45 // doesn't hurt to have these ready already.
46 #ifdef HAVE_FAST_UNALIGNED_ACCESS
48 static inline uint16_t
49 integer_read_16(const uint8_t buf[static 2])
51 uint16_t ret = *(const uint16_t *)(buf);
52 return integer_le_16(ret);
56 static inline uint32_t
57 integer_read_32(const uint8_t buf[static 4])
59 uint32_t ret = *(const uint32_t *)(buf);
60 return integer_le_32(ret);
65 static inline uint64_t
66 integer_read_64(const uint8_t buf[static 8])
68 uint64_t ret = *(const uint64_t *)(buf);
69 return integer_le_64(ret);
75 integer_write_16(uint8_t buf[static 2], uint16_t num)
77 *(uint16_t *)(buf) = integer_le_16(num);
82 integer_write_32(uint8_t buf[static 4], uint32_t num)
84 *(uint32_t *)(buf) = integer_le_32(num);
90 integer_write_64(uint8_t buf[static 8], uint64_t num)
92 *(uint64_t *)(buf) = integer_le_64(num);
99 static inline uint16_t
100 integer_read_16(const uint8_t buf[static 2])
102 uint16_t ret = buf[0] | (buf[1] << 8);
107 static inline uint32_t
108 integer_read_32(const uint8_t buf[static 4])
110 uint32_t ret = buf[0];
111 ret |= (uint32_t)(buf[1]) << 8;
112 ret |= (uint32_t)(buf[2]) << 16;
113 ret |= (uint32_t)(buf[3]) << 24;
119 static inline uint64_t
120 integer_read_64(const uint8_t buf[static 8])
122 uint64_t ret = buf[0];
123 ret |= (uint64_t)(buf[1]) << 8;
124 ret |= (uint64_t)(buf[2]) << 16;
125 ret |= (uint64_t)(buf[3]) << 24;
126 ret |= (uint64_t)(buf[4]) << 32;
127 ret |= (uint64_t)(buf[5]) << 40;
128 ret |= (uint64_t)(buf[6]) << 48;
129 ret |= (uint64_t)(buf[7]) << 56;
136 integer_write_16(uint8_t buf[static 2], uint16_t num)
138 buf[0] = (uint8_t)(num);
139 buf[1] = (uint8_t)(num >> 8);
144 integer_write_32(uint8_t buf[static 4], uint32_t num)
146 buf[0] = (uint8_t)(num);
147 buf[1] = (uint8_t)(num >> 8);
148 buf[2] = (uint8_t)(num >> 16);
149 buf[3] = (uint8_t)(num >> 24);
155 integer_write_64(uint8_t buf[static 8], uint64_t num)
157 buf[0] = (uint8_t)(num);
158 buf[1] = (uint8_t)(num >> 8);
159 buf[2] = (uint8_t)(num >> 16);
160 buf[3] = (uint8_t)(num >> 24);
161 buf[4] = (uint8_t)(num >> 32);
162 buf[5] = (uint8_t)(num >> 40);
163 buf[6] = (uint8_t)(num >> 48);
164 buf[7] = (uint8_t)(num >> 56);