1 ///////////////////////////////////////////////////////////////////////////////
4 /// \brief Reading and writing integers from and to buffers
6 // This code has been put into the public domain.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 ///////////////////////////////////////////////////////////////////////////////
14 #ifndef LZMA_INTEGER_H
15 #define LZMA_INTEGER_H
17 // On big endian, we need byte swapping. These macros may be used outside
18 // this file, so don't put these inside HAVE_FAST_UNALIGNED_ACCESS.
19 #ifdef WORDS_BIGENDIAN
21 # define integer_le_16(n) bswap_16(n)
22 # define integer_le_32(n) bswap_32(n)
23 # define integer_le_64(n) bswap_64(n)
25 # define integer_le_16(n) (n)
26 # define integer_le_32(n) (n)
27 # define integer_le_64(n) (n)
31 // I'm aware of AC_CHECK_ALIGNED_ACCESS_REQUIRED from Autoconf archive, but
32 // it's not useful here. We don't care if unaligned access is supported,
33 // we care if it is fast. Some systems can emulate unaligned access in
34 // software, which is horribly slow; we want to use byte-by-byte access on
35 // such systems but the Autoconf test would detect such a system as
36 // supporting unaligned access.
38 // NOTE: HAVE_FAST_UNALIGNED_ACCESS indicates only support for 16-bit and
39 // 32-bit integer loads and stores. 64-bit integers may or may not work.
40 // That's why 64-bit functions are commented out.
42 // TODO: Big endian PowerPC supports byte swapping load and store instructions
43 // that also allow unaligned access. Inline assembler could be OK for that.
45 // Performance of these functions isn't that important until LZMA3, but it
46 // doesn't hurt to have these ready already.
47 #ifdef HAVE_FAST_UNALIGNED_ACCESS
49 static inline uint16_t
50 integer_read_16(const uint8_t buf[static 2])
52 uint16_t ret = *(const uint16_t *)(buf);
53 return integer_le_16(ret);
57 static inline uint32_t
58 integer_read_32(const uint8_t buf[static 4])
60 uint32_t ret = *(const uint32_t *)(buf);
61 return integer_le_32(ret);
66 static inline uint64_t
67 integer_read_64(const uint8_t buf[static 8])
69 uint64_t ret = *(const uint64_t *)(buf);
70 return integer_le_64(ret);
76 integer_write_16(uint8_t buf[static 2], uint16_t num)
78 *(uint16_t *)(buf) = integer_le_16(num);
83 integer_write_32(uint8_t buf[static 4], uint32_t num)
85 *(uint32_t *)(buf) = integer_le_32(num);
91 integer_write_64(uint8_t buf[static 8], uint64_t num)
93 *(uint64_t *)(buf) = integer_le_64(num);
100 static inline uint16_t
101 integer_read_16(const uint8_t buf[static 2])
103 uint16_t ret = buf[0] | (buf[1] << 8);
108 static inline uint32_t
109 integer_read_32(const uint8_t buf[static 4])
111 uint32_t ret = buf[0];
112 ret |= (uint32_t)(buf[1]) << 8;
113 ret |= (uint32_t)(buf[2]) << 16;
114 ret |= (uint32_t)(buf[3]) << 24;
120 static inline uint64_t
121 integer_read_64(const uint8_t buf[static 8])
123 uint64_t ret = buf[0];
124 ret |= (uint64_t)(buf[1]) << 8;
125 ret |= (uint64_t)(buf[2]) << 16;
126 ret |= (uint64_t)(buf[3]) << 24;
127 ret |= (uint64_t)(buf[4]) << 32;
128 ret |= (uint64_t)(buf[5]) << 40;
129 ret |= (uint64_t)(buf[6]) << 48;
130 ret |= (uint64_t)(buf[7]) << 56;
137 integer_write_16(uint8_t buf[static 2], uint16_t num)
139 buf[0] = (uint8_t)(num);
140 buf[1] = (uint8_t)(num >> 8);
145 integer_write_32(uint8_t buf[static 4], uint32_t num)
147 buf[0] = (uint8_t)(num);
148 buf[1] = (uint8_t)(num >> 8);
149 buf[2] = (uint8_t)(num >> 16);
150 buf[3] = (uint8_t)(num >> 24);
156 integer_write_64(uint8_t buf[static 8], uint64_t num)
158 buf[0] = (uint8_t)(num);
159 buf[1] = (uint8_t)(num >> 8);
160 buf[2] = (uint8_t)(num >> 16);
161 buf[3] = (uint8_t)(num >> 24);
162 buf[4] = (uint8_t)(num >> 32);
163 buf[5] = (uint8_t)(num >> 40);
164 buf[6] = (uint8_t)(num >> 48);
165 buf[7] = (uint8_t)(num >> 56);