2 * Speed-optimized CRC64 using slicing-by-four algorithm
3 * Instruction set: i386
6 * This code has been put into the public domain by its authors:
7 * Igor Pavlov <http://7-zip.org/>
8 * Lasse Collin <lasse.collin@tukaani.org>
10 * This code needs lzma_crc64_table, which can be created using the
13 uint64_t lzma_crc64_table[4][256];
18 static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
20 for (size_t s = 0; s < 4; ++s) {
21 for (size_t b = 0; b < 256; ++b) {
22 uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
24 for (size_t i = 0; i < 8; ++i) {
26 r = (r >> 1) ^ poly64;
31 lzma_crc64_table[s][b] = r;
36 * The prototype of the CRC64 function:
37 * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
42 .type lzma_crc64, @function
51 * %edi size or buf + size
52 * %ebx lzma_crc64_table
60 movl 0x14(%esp), %esi /* buf */
61 movl 0x18(%esp), %edi /* size */
62 movl 0x1C(%esp), %eax /* crc LSB */
63 movl 0x20(%esp), %edx /* crc MSB */
66 * Store the address of lzma_crc64_table to %ebx. This is needed to
67 * get position-independent code (PIC).
72 addl $_GLOBAL_OFFSET_TABLE_+[.-.L_PIC], %ebx
73 movl lzma_crc64_table@GOT(%ebx), %ebx
75 /* Complement the initial value. */
81 * Check if there is enough input to use slicing-by-four.
82 * We need eight bytes, because the loop pre-reads four bytes.
87 /* Check if we have reached alignment of four bytes. */
91 /* Calculate CRC of the next input byte. */
97 xorl (%ebx, %ebp, 8), %eax
99 xorl 4(%ebx, %ebp, 8), %edx
105 * If we get here, there's at least eight bytes of aligned input
106 * available. Make %edi multiple of four bytes. Store the possible
107 * remainder over the "size" variable in the argument stack.
109 movl %edi, 0x18(%esp)
111 subl %edi, 0x18(%esp)
114 * Let %edi be buf + size - 4 while running the main loop. This way
115 * we can compare for equality to determine when exit the loop.
120 /* Read in the first four aligned bytes. */
126 movl 0x1800(%ebx, %ebp, 8), %eax
128 movl 0x1804(%ebx, %ebp, 8), %edx
130 xorl 0x1000(%ebx, %ebp, 8), %eax
131 xorl 0x1004(%ebx, %ebp, 8), %edx
134 xorl 0x0800(%ebx, %ebp, 8), %eax
135 xorl 0x0804(%ebx, %ebp, 8), %edx
138 xorl (%ebx, %ebp, 8), %eax
139 xorl 4(%ebx, %ebp, 8), %edx
141 /* Check for end of aligned input. */
145 * Copy the next input byte to %ecx. It is slightly faster to
146 * read it here than at the top of the loop.
152 * Process the remaining four bytes, which we have already
157 movl 0x1800(%ebx, %ebp, 8), %eax
159 movl 0x1804(%ebx, %ebp, 8), %edx
161 xorl 0x1000(%ebx, %ebp, 8), %eax
162 xorl 0x1004(%ebx, %ebp, 8), %edx
165 xorl 0x0800(%ebx, %ebp, 8), %eax
166 xorl 0x0804(%ebx, %ebp, 8), %edx
169 xorl (%ebx, %ebp, 8), %eax
170 xorl 4(%ebx, %ebp, 8), %edx
172 /* Copy the number of remaining bytes to %edi. */
173 movl 0x18(%esp), %edi
176 /* Check for end of input. */
180 /* Calculate CRC of the next input byte. */
186 xorl (%ebx, %ebp, 8), %eax
188 xorl 4(%ebx, %ebp, 8), %edx
193 /* Complement the final value. */
203 .size lzma_crc64, .-lzma_crc64
206 * This is needed to support non-executable stack. It's ugly to
207 * use __linux__ here, but I don't know a way to detect when
208 * we are using GNU assembler.
210 #if defined(__ELF__) && defined(__linux__)
211 .section .note.GNU-stack,"",@progbits