1 ///////////////////////////////////////////////////////////////////////////////
4 /// \brief LZ in window
6 // Copyright (C) 1999-2006 Igor Pavlov
7 // Copyright (C) 2007 Lasse Collin
9 // This library is free software; you can redistribute it and/or
10 // modify it under the terms of the GNU Lesser General Public
11 // License as published by the Free Software Foundation; either
12 // version 2.1 of the License, or (at your option) any later version.
14 // This library is distributed in the hope that it will be useful,
15 // but WITHOUT ANY WARRANTY; without even the implied warranty of
16 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 // Lesser General Public License for more details.
19 ///////////////////////////////////////////////////////////////////////////////
21 #include "lz_encoder_private.h"
43 /// This is needed in two places so provide a macro.
44 #define get_cyclic_buffer_size(history_size) ((history_size) + 1)
47 /// Calculate certain match finder properties and validate the calculated
48 /// values. This is as its own function, because *num_items is needed to
49 /// calculate memory requirements in common/memory.c.
51 lzma_lz_encoder_hash_properties(lzma_match_finder match_finder,
52 uint32_t history_size, uint32_t *restrict hash_mask,
53 uint32_t *restrict hash_size_sum, uint32_t *restrict num_items)
55 uint32_t fix_hash_size;
58 switch (match_finder) {
61 fix_hash_size = LZMA_HC3_FIX_HASH_SIZE;
67 fix_hash_size = LZMA_HC4_FIX_HASH_SIZE;
73 fix_hash_size = LZMA_BT2_FIX_HASH_SIZE;
79 fix_hash_size = LZMA_BT3_FIX_HASH_SIZE;
85 fix_hash_size = LZMA_BT4_FIX_HASH_SIZE;
96 if (match_finder == LZMA_BT2) {
97 // NOTE: hash_mask is not used by the BT2 match finder,
98 // but it is initialized just in case.
99 hs = LZMA_BT2_HASH_SIZE;
104 hs = history_size - 1;
112 if (hs > (UINT32_C(1) << 24)) {
113 if (match_finder == LZMA_MF_HC4
114 || match_finder == LZMA_MF_BT4)
124 *hash_size_sum = hs + fix_hash_size;
126 *num_items = *hash_size_sum
127 + get_cyclic_buffer_size(history_size) * sons;
134 lzma_lz_encoder_reset(lzma_lz_encoder *lz, lzma_allocator *allocator,
135 bool (*process)(lzma_coder *coder, uint8_t *restrict out,
136 size_t *restrict out_pos, size_t out_size),
137 lzma_vli uncompressed_size,
138 size_t history_size, size_t additional_buffer_before,
139 size_t match_max_len, size_t additional_buffer_after,
140 lzma_match_finder match_finder, uint32_t match_finder_cycles,
141 const uint8_t *preset_dictionary,
142 size_t preset_dictionary_size)
144 lz->sequence = SEQ_START;
145 lz->uncompressed_size = uncompressed_size;
152 // Validate history size.
153 if (history_size < LZMA_DICTIONARY_SIZE_MIN
154 || history_size > LZMA_DICTIONARY_SIZE_MAX) {
155 lzma_lz_encoder_end(lz, allocator);
156 return LZMA_HEADER_ERROR;
159 assert(history_size <= MAX_VAL_FOR_NORMALIZE - 256);
160 assert(LZMA_DICTIONARY_SIZE_MAX <= MAX_VAL_FOR_NORMALIZE - 256);
162 // Calculate the size of the history buffer to allocate.
163 // TODO: Get a reason for magic constant of 256.
164 const size_t size_reserv = (history_size + additional_buffer_before
165 + match_max_len + additional_buffer_after) / 2 + 256;
167 lz->keep_size_before = history_size + additional_buffer_before;
168 lz->keep_size_after = match_max_len + additional_buffer_after;
170 const size_t buffer_size = lz->keep_size_before + lz->keep_size_after
173 // Allocate history buffer if its size has changed.
174 if (buffer_size != lz->size) {
175 lzma_free(lz->buffer, allocator);
176 lz->buffer = lzma_alloc(buffer_size, allocator);
177 if (lz->buffer == NULL) {
178 lzma_lz_encoder_end(lz, allocator);
179 return LZMA_MEM_ERROR;
183 // Allocation successful. Store the new size.
184 lz->size = buffer_size;
186 // Reset in window variables.
198 // Validate match_finder, set function pointers and a few match
199 // finder specific variables.
200 switch (match_finder) {
203 lz->get_matches = &lzma_hc3_get_matches;
204 lz->skip = &lzma_hc3_skip;
205 lz->cut_value = 8 + (match_max_len >> 2);
210 lz->get_matches = &lzma_hc4_get_matches;
211 lz->skip = &lzma_hc4_skip;
212 lz->cut_value = 8 + (match_max_len >> 2);
217 lz->get_matches = &lzma_bt2_get_matches;
218 lz->skip = &lzma_bt2_skip;
219 lz->cut_value = 16 + (match_max_len >> 1);
224 lz->get_matches = &lzma_bt3_get_matches;
225 lz->skip = &lzma_bt3_skip;
226 lz->cut_value = 16 + (match_max_len >> 1);
231 lz->get_matches = &lzma_bt4_get_matches;
232 lz->skip = &lzma_bt4_skip;
233 lz->cut_value = 16 + (match_max_len >> 1);
237 lzma_lz_encoder_end(lz, allocator);
238 return LZMA_HEADER_ERROR;
241 // Check if we have been requested to use a non-default cut_value.
242 if (match_finder_cycles > 0)
243 lz->cut_value = match_finder_cycles;
245 lz->match_max_len = match_max_len;
246 lz->cyclic_buffer_size = get_cyclic_buffer_size(history_size);
248 uint32_t hash_size_sum;
250 if (lzma_lz_encoder_hash_properties(match_finder, history_size,
251 &lz->hash_mask, &hash_size_sum, &num_items)) {
252 lzma_lz_encoder_end(lz, allocator);
253 return LZMA_HEADER_ERROR;
256 if (num_items != lz->num_items) {
257 #if UINT32_MAX >= SIZE_MAX / 4
258 // Check for integer overflow. (Huge dictionaries are not
259 // possible on 32-bit CPU.)
260 if (num_items > SIZE_MAX / sizeof(uint32_t)) {
261 lzma_lz_encoder_end(lz, allocator);
262 return LZMA_MEM_ERROR;
266 const size_t size_in_bytes
267 = (size_t)(num_items) * sizeof(uint32_t);
269 lzma_free(lz->hash, allocator);
270 lz->hash = lzma_alloc(size_in_bytes, allocator);
271 if (lz->hash == NULL) {
272 lzma_lz_encoder_end(lz, allocator);
273 return LZMA_MEM_ERROR;
276 lz->num_items = num_items;
279 lz->son = lz->hash + hash_size_sum;
281 // Reset the hash table to empty hash values.
283 uint32_t *restrict items = lz->hash;
285 for (uint32_t i = 0; i < hash_size_sum; ++i)
286 items[i] = EMPTY_HASH_VALUE;
289 lz->cyclic_buffer_pos = 0;
291 // Because zero is used as empty hash value, make the first byte
292 // appear at buffer[1 - offset].
295 // If we are using a preset dictionary, read it now.
296 // TODO: This isn't implemented yet so return LZMA_HEADER_ERROR.
297 if (preset_dictionary != NULL && preset_dictionary_size > 0) {
298 lzma_lz_encoder_end(lz, allocator);
299 return LZMA_HEADER_ERROR;
302 // Set the process function pointer.
303 lz->process = process;
310 lzma_lz_encoder_end(lzma_lz_encoder *lz, lzma_allocator *allocator)
312 lzma_free(lz->hash, allocator);
316 lzma_free(lz->buffer, allocator);
324 /// \brief Moves the data in the input window to free space for new data
326 /// lz->buffer is a sliding input window, which keeps lz->keep_size_before
327 /// bytes of input history available all the time. Now and then we need to
328 /// "slide" the buffer to make space for the new data to the end of the
329 /// buffer. At the same time, data older than keep_size_before is dropped.
332 move_window(lzma_lz_encoder *lz)
334 // buffer[move_offset] will become buffer[0].
335 assert(lz->read_pos > lz->keep_size_after);
336 size_t move_offset = lz->read_pos - lz->keep_size_before;
338 // We need one additional byte, since move_pos() moves on 1 byte.
339 // TODO: Clean up? At least document more.
343 assert(lz->write_pos > move_offset);
344 const size_t move_size = lz->write_pos - move_offset;
346 assert(move_offset + move_size <= lz->size);
348 memmove(lz->buffer, lz->buffer + move_offset, move_size);
350 lz->offset += move_offset;
351 lz->read_pos -= move_offset;
352 lz->read_limit -= move_offset;
353 lz->write_pos -= move_offset;
359 /// \brief Tries to fill the input window (lz->buffer)
361 /// If we are the last encoder in the chain, our input data is in in[].
362 /// Otherwise we call the next filter in the chain to process in[] and
363 /// write its output to lz->buffer.
365 /// This function must not be called once it has returned LZMA_STREAM_END.
368 fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
369 size_t *in_pos, size_t in_size, lzma_action action)
371 assert(coder->lz.read_pos <= coder->lz.write_pos);
373 // Move the sliding window if needed.
374 if (coder->lz.read_pos >= coder->lz.size - coder->lz.keep_size_after)
375 move_window(&coder->lz);
379 if (coder->next.code == NULL) {
380 // Not using a filter, simply memcpy() as much as possible.
381 in_used = bufcpy(in, in_pos, in_size, coder->lz.buffer,
382 &coder->lz.write_pos, coder->lz.size);
384 if (action != LZMA_RUN && *in_pos == in_size)
385 ret = LZMA_STREAM_END;
390 const size_t in_start = *in_pos;
391 ret = coder->next.code(coder->next.coder, allocator,
393 coder->lz.buffer, &coder->lz.write_pos,
394 coder->lz.size, action);
395 in_used = *in_pos - in_start;
398 assert(coder->lz.uncompressed_size >= in_used);
399 if (coder->lz.uncompressed_size != LZMA_VLI_VALUE_UNKNOWN)
400 coder->lz.uncompressed_size -= in_used;
402 // If end of stream has been reached or flushing completed, we allow
403 // the encoder to process all the input (that is, read_pos is allowed
404 // to reach write_pos). Otherwise we keep keep_size_after bytes
405 // available as prebuffer.
406 if (ret == LZMA_STREAM_END) {
407 assert(*in_pos == in_size);
408 coder->lz.read_limit = coder->lz.write_pos;
412 case LZMA_SYNC_FLUSH:
413 coder->lz.sequence = SEQ_FLUSH;
417 coder->lz.sequence = SEQ_FINISH;
422 ret = LZMA_PROG_ERROR;
426 } else if (coder->lz.write_pos > coder->lz.keep_size_after) {
427 // This needs to be done conditionally, because if we got
428 // only little new input, there may be too little input
429 // to do any encoding yet.
430 coder->lz.read_limit = coder->lz.write_pos
431 - coder->lz.keep_size_after;
434 // Switch to finishing mode if we have got all the input data.
435 // lzma_lz_encode() won't return LZMA_STREAM_END until LZMA_FINISH
438 // NOTE: When LZMA is used together with other filters, it is possible
439 // that coder->lz.sequence gets set to SEQ_FINISH before the next
440 // encoder has returned LZMA_STREAM_END. This is somewhat ugly, but
441 // works correctly, because the next encoder cannot have any more
442 // output left to be produced. If it had, then our known Uncompressed
443 // Size would be invalid, which would mean that we have a bad bug.
444 if (ret == LZMA_OK && coder->lz.uncompressed_size == 0)
445 coder->lz.sequence = SEQ_FINISH;
447 // Restart the match finder after finished LZMA_SYNC_FLUSH.
448 if (coder->lz.pending > 0
449 && coder->lz.read_pos < coder->lz.read_limit) {
450 // Match finder may update coder->pending and expects it to
451 // start from zero, so use a temporary variable.
452 const size_t pending = coder->lz.pending;
453 coder->lz.pending = 0;
455 // Rewind read_pos so that the match finder can hash
456 // the pending bytes.
457 assert(coder->lz.read_pos >= pending);
458 coder->lz.read_pos -= pending;
459 coder->lz.skip(&coder->lz, pending);
467 lzma_lz_encode(lzma_coder *coder, lzma_allocator *allocator,
468 const uint8_t *restrict in, size_t *restrict in_pos,
470 uint8_t *restrict out, size_t *restrict out_pos,
471 size_t out_size, lzma_action action)
473 // Flush the temporary output buffer, which may be used when the
474 // encoder runs of out of space in primary output buffer (the out,
475 // *out_pos, and out_size variables).
476 if (coder->lz.temp_size > 0) {
477 const size_t out_avail = out_size - *out_pos;
478 if (out_avail < coder->lz.temp_size) {
479 // Cannot copy everything. Copy as much as possible
480 // and move the data in lz.temp to the beginning of
482 memcpy(out + *out_pos, coder->lz.temp, out_avail);
483 *out_pos += out_avail;
484 memmove(coder->lz.temp, coder->lz.temp + out_avail,
485 coder->lz.temp_size - out_avail);
486 coder->lz.temp_size -= out_avail;
490 // We can copy everything from coder->lz.temp to out.
491 memcpy(out + *out_pos, coder->lz.temp, coder->lz.temp_size);
492 *out_pos += coder->lz.temp_size;
493 coder->lz.temp_size = 0;
496 switch (coder->lz.sequence) {
498 assert(coder->lz.read_pos == coder->lz.write_pos);
500 // If there is no new input data and LZMA_SYNC_FLUSH is used
501 // immediatelly after previous LZMA_SYNC_FLUSH finished or
502 // at the very beginning of the input stream, we return
503 // LZMA_STREAM_END immediatelly. Writing a flush marker
504 // to the very beginning of the stream or right after previous
505 // flush marker is not allowed by the LZMA stream format.
506 if (*in_pos == in_size && action == LZMA_SYNC_FLUSH)
507 return LZMA_STREAM_END;
509 coder->lz.sequence = SEQ_RUN;
513 // During an earlier call to this function, flushing was
514 // otherwise finished except some data was left pending
515 // in coder->lz.buffer. Now we have copied all that data
516 // to the output buffer and can return LZMA_STREAM_END.
517 coder->lz.sequence = SEQ_START;
518 assert(action == LZMA_SYNC_FLUSH);
519 return LZMA_STREAM_END;
522 // This is like the above flushing case, but for finishing
525 // NOTE: action is not necesarily LZMA_FINISH; it can
526 // be LZMA_RUN or LZMA_SYNC_FLUSH too in case it is used
527 // at the end of the stream with known Uncompressed Size.
528 return action != LZMA_RUN ? LZMA_STREAM_END : LZMA_OK;
534 while (*out_pos < out_size
535 && (*in_pos < in_size || action != LZMA_RUN)) {
536 // Read more data to coder->lz.buffer if needed.
537 if (coder->lz.sequence == SEQ_RUN
538 && coder->lz.read_pos >= coder->lz.read_limit)
539 return_if_error(fill_window(coder, allocator,
540 in, in_pos, in_size, action));
543 if (coder->lz.process(coder, out, out_pos, out_size)) {
544 if (coder->lz.sequence == SEQ_FLUSH) {
545 assert(action == LZMA_SYNC_FLUSH);
546 if (coder->lz.temp_size == 0) {
547 // Flushing was finished successfully.
548 coder->lz.sequence = SEQ_START;
550 // Flushing was otherwise finished,
551 // except that some data was left
552 // into coder->lz.buffer.
553 coder->lz.sequence = SEQ_FLUSH_END;
556 // NOTE: action may be LZMA_RUN here in case
557 // Uncompressed Size is known and we have
558 // processed all the data already.
559 assert(coder->lz.sequence == SEQ_FINISH);
560 coder->lz.sequence = SEQ_END;
563 return action != LZMA_RUN && coder->lz.temp_size == 0
564 ? LZMA_STREAM_END : LZMA_OK;
572 /// \brief Normalizes hash values
574 /// lzma_lz_normalize is called when lz->pos hits MAX_VAL_FOR_NORMALIZE,
575 /// which currently happens once every 2 GiB of input data (to be exact,
576 /// after the first 2 GiB it happens once every 2 GiB minus dictionary_size
577 /// bytes). lz->pos is incremented by lzma_lz_move_pos().
579 /// lz->hash contains big amount of offsets relative to lz->buffer.
580 /// The offsets are stored as uint32_t, which is the only reasonable
581 /// datatype for these offsets; uint64_t would waste far too much RAM
582 /// and uint16_t would limit the dictionary to 64 KiB (far too small).
584 /// When compressing files over 2 GiB, lz->buffer needs to be moved forward
585 /// to avoid integer overflows. We scan the lz->hash array and fix every
586 /// value to match the updated lz->buffer.
588 lzma_lz_encoder_normalize(lzma_lz_encoder *lz)
590 const uint32_t subvalue = lz->read_pos - lz->cyclic_buffer_size;
591 assert(subvalue <= INT32_MAX);
594 const uint32_t num_items = lz->num_items;
595 uint32_t *restrict items = lz->hash;
597 for (uint32_t i = 0; i < num_items; ++i) {
598 // If the distance is greater than the dictionary
599 // size, we can simply mark the item as empty.
600 if (items[i] <= subvalue)
601 items[i] = EMPTY_HASH_VALUE;
603 items[i] -= subvalue;
607 // Update offset to match the new locations.
608 lz->offset -= subvalue;