VirtualBox

source: vbox/trunk/src/libs/liblzma-5.6.4/common/stream_decoder_mt.c@ 109042

Last change on this file since 109042 was 108905, checked in by vboxsync, 4 weeks ago

liblzma-5.6.4: Applied and adjusted our liblzma changes to 5.6.4. jiraref:VBP-1613

  • Property svn:eol-style set to LF
  • Property svn:keywords set to Author Date Id Revision
File size: 63.0 KB
Line 
1// SPDX-License-Identifier: 0BSD
2
3///////////////////////////////////////////////////////////////////////////////
4//
5/// \file stream_decoder_mt.c
6/// \brief Multithreaded .xz Stream decoder
7//
8// Authors: Sebastian Andrzej Siewior
9// Lasse Collin
10//
11///////////////////////////////////////////////////////////////////////////////
12
13#include "common.h"
14#include "block_decoder.h"
15#include "stream_decoder.h"
16#include "index.h"
17#include "outqueue.h"
18
19
20typedef enum {
21 /// Waiting for work.
22 /// Main thread may change this to THR_RUN or THR_EXIT.
23 THR_IDLE,
24
25 /// Decoding is in progress.
26 /// Main thread may change this to THR_STOP or THR_EXIT.
27 /// The worker thread may change this to THR_IDLE.
28 THR_RUN,
29
30 /// The main thread wants the thread to stop whatever it was doing
31 /// but not exit. Main thread may change this to THR_EXIT.
32 /// The worker thread may change this to THR_IDLE.
33 THR_STOP,
34
35 /// The main thread wants the thread to exit.
36 THR_EXIT,
37
38} worker_state;
39
40
41typedef enum {
42 /// Partial updates (storing of worker thread progress
43 /// to lzma_outbuf) are disabled.
44 PARTIAL_DISABLED,
45
46 /// Main thread requests partial updates to be enabled but
47 /// no partial update has been done by the worker thread yet.
48 ///
49 /// Changing from PARTIAL_DISABLED to PARTIAL_START requires
50 /// use of the worker-thread mutex. Other transitions don't
51 /// need a mutex.
52 PARTIAL_START,
53
54 /// Partial updates are enabled and the worker thread has done
55 /// at least one partial update.
56 PARTIAL_ENABLED,
57
58} partial_update_mode;
59
60
61struct worker_thread {
62 /// Worker state is protected with our mutex.
63 worker_state state;
64
65 /// Input buffer that will contain the whole Block except Block Header.
66 uint8_t *in;
67
68 /// Amount of memory allocated for "in"
69 size_t in_size;
70
71 /// Number of bytes written to "in" by the main thread
72 size_t in_filled;
73
74 /// Number of bytes consumed from "in" by the worker thread.
75 size_t in_pos;
76
77 /// Amount of uncompressed data that has been decoded. This local
78 /// copy is needed because updating outbuf->pos requires locking
79 /// the main mutex (coder->mutex).
80 size_t out_pos;
81
82 /// Pointer to the main structure is needed to (1) lock the main
83 /// mutex (coder->mutex) when updating outbuf->pos and (2) when
84 /// putting this thread back to the stack of free threads.
85 struct lzma_stream_coder *coder;
86
87 /// The allocator is set by the main thread. Since a copy of the
88 /// pointer is kept here, the application must not change the
89 /// allocator before calling lzma_end().
90 const lzma_allocator *allocator;
91
92 /// Output queue buffer to which the uncompressed data is written.
93 lzma_outbuf *outbuf;
94
95 /// Amount of compressed data that has already been decompressed.
96 /// This is updated from in_pos when our mutex is locked.
97 /// This is size_t, not uint64_t, because per-thread progress
98 /// is limited to sizes of allocated buffers.
99 size_t progress_in;
100
101 /// Like progress_in but for uncompressed data.
102 size_t progress_out;
103
104 /// Updating outbuf->pos requires locking the main mutex
105 /// (coder->mutex). Since the main thread will only read output
106 /// from the oldest outbuf in the queue, only the worker thread
107 /// that is associated with the oldest outbuf needs to update its
108 /// outbuf->pos. This avoids useless mutex contention that would
109 /// happen if all worker threads were frequently locking the main
110 /// mutex to update their outbuf->pos.
111 ///
112 /// Only when partial_update is something else than PARTIAL_DISABLED,
113 /// this worker thread will update outbuf->pos after each call to
114 /// the Block decoder.
115 partial_update_mode partial_update;
116
117 /// Block decoder
118 lzma_next_coder block_decoder;
119
120 /// Thread-specific Block options are needed because the Block
121 /// decoder modifies the struct given to it at initialization.
122 lzma_block block_options;
123
124 /// Filter chain memory usage
125 uint64_t mem_filters;
126
127 /// Next structure in the stack of free worker threads.
128 struct worker_thread *next;
129
130 mythread_mutex mutex;
131 mythread_cond cond;
132
133 /// The ID of this thread is used to join the thread
134 /// when it's not needed anymore.
135 mythread thread_id;
136};
137
138
139struct lzma_stream_coder {
140 enum {
141 SEQ_STREAM_HEADER,
142 SEQ_BLOCK_HEADER,
143 SEQ_BLOCK_INIT,
144 SEQ_BLOCK_THR_INIT,
145 SEQ_BLOCK_THR_RUN,
146 SEQ_BLOCK_DIRECT_INIT,
147 SEQ_BLOCK_DIRECT_RUN,
148 SEQ_INDEX_WAIT_OUTPUT,
149 SEQ_INDEX_DECODE,
150 SEQ_STREAM_FOOTER,
151 SEQ_STREAM_PADDING,
152 SEQ_ERROR,
153 } sequence;
154
155 /// Block decoder
156 lzma_next_coder block_decoder;
157
158 /// Every Block Header will be decoded into this structure.
159 /// This is also used to initialize a Block decoder when in
160 /// direct mode. In threaded mode, a thread-specific copy will
161 /// be made for decoder initialization because the Block decoder
162 /// will modify the structure given to it.
163 lzma_block block_options;
164
165 /// Buffer to hold a filter chain for Block Header decoding and
166 /// initialization. These are freed after successful Block decoder
167 /// initialization or at stream_decoder_mt_end(). The thread-specific
168 /// copy of block_options won't hold a pointer to filters[] after
169 /// initialization.
170 lzma_filter filters[LZMA_FILTERS_MAX + 1];
171
172 /// Stream Flags from Stream Header
173 lzma_stream_flags stream_flags;
174
175 /// Index is hashed so that it can be compared to the sizes of Blocks
176 /// with O(1) memory usage.
177 lzma_index_hash *index_hash;
178
179
180 /// Maximum wait time if cannot use all the input and cannot
181 /// fill the output buffer. This is in milliseconds.
182 uint32_t timeout;
183
184
185 /// Error code from a worker thread.
186 ///
187 /// \note Use mutex.
188 lzma_ret thread_error;
189
190 /// Error code to return after pending output has been copied out. If
191 /// set in read_output_and_wait(), this is a mirror of thread_error.
192 /// If set in stream_decode_mt() then it's, for example, error that
193 /// occurred when decoding Block Header.
194 lzma_ret pending_error;
195
196 /// Number of threads that will be created at maximum.
197 uint32_t threads_max;
198
199 /// Number of thread structures that have been initialized from
200 /// "threads", and thus the number of worker threads actually
201 /// created so far.
202 uint32_t threads_initialized;
203
204 /// Array of allocated thread-specific structures. When no threads
205 /// are in use (direct mode) this is NULL. In threaded mode this
206 /// points to an array of threads_max number of worker_thread structs.
207 struct worker_thread *threads;
208
209 /// Stack of free threads. When a thread finishes, it puts itself
210 /// back into this stack. This starts as empty because threads
211 /// are created only when actually needed.
212 ///
213 /// \note Use mutex.
214 struct worker_thread *threads_free;
215
216 /// The most recent worker thread to which the main thread writes
217 /// the new input from the application.
218 struct worker_thread *thr;
219
220 /// Output buffer queue for decompressed data from the worker threads
221 ///
222 /// \note Use mutex with operations that need it.
223 lzma_outq outq;
224
225 mythread_mutex mutex;
226 mythread_cond cond;
227
228
229 /// Memory usage that will not be exceeded in multi-threaded mode.
230 /// Single-threaded mode can exceed this even by a large amount.
231 uint64_t memlimit_threading;
232
233 /// Memory usage limit that should never be exceeded.
234 /// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
235 /// even in single-threaded mode without exceeding this limit.
236 uint64_t memlimit_stop;
237
238 /// Amount of memory in use by the direct mode decoder
239 /// (coder->block_decoder). In threaded mode this is 0.
240 uint64_t mem_direct_mode;
241
242 /// Amount of memory needed by the running worker threads.
243 /// This doesn't include the memory needed by the output buffer.
244 ///
245 /// \note Use mutex.
246 uint64_t mem_in_use;
247
248 /// Amount of memory used by the idle (cached) threads.
249 ///
250 /// \note Use mutex.
251 uint64_t mem_cached;
252
253
254 /// Amount of memory needed for the filter chain of the next Block.
255 uint64_t mem_next_filters;
256
257 /// Amount of memory needed for the thread-specific input buffer
258 /// for the next Block.
259 uint64_t mem_next_in;
260
261 /// Amount of memory actually needed to decode the next Block
262 /// in threaded mode. This is
263 /// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
264 uint64_t mem_next_block;
265
266
267 /// Amount of compressed data in Stream Header + Blocks that have
268 /// already been finished.
269 ///
270 /// \note Use mutex.
271 uint64_t progress_in;
272
273 /// Amount of uncompressed data in Blocks that have already
274 /// been finished.
275 ///
276 /// \note Use mutex.
277 uint64_t progress_out;
278
279
280 /// If true, LZMA_NO_CHECK is returned if the Stream has
281 /// no integrity check.
282 bool tell_no_check;
283
284 /// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
285 /// an integrity check that isn't supported by this liblzma build.
286 bool tell_unsupported_check;
287
288 /// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
289 bool tell_any_check;
290
291 /// If true, we will tell the Block decoder to skip calculating
292 /// and verifying the integrity check.
293 bool ignore_check;
294
295 /// If true, we will decode concatenated Streams that possibly have
296 /// Stream Padding between or after them. LZMA_STREAM_END is returned
297 /// once the application isn't giving us any new input (LZMA_FINISH),
298 /// and we aren't in the middle of a Stream, and possible
299 /// Stream Padding is a multiple of four bytes.
300 bool concatenated;
301
302 /// If true, we will return any errors immediately instead of first
303 /// producing all output before the location of the error.
304 bool fail_fast;
305
306
307 /// When decoding concatenated Streams, this is true as long as we
308 /// are decoding the first Stream. This is needed to avoid misleading
309 /// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
310 /// bytes.
311 bool first_stream;
312
313 /// This is used to track if the previous call to stream_decode_mt()
314 /// had output space (*out_pos < out_size) and managed to fill the
315 /// output buffer (*out_pos == out_size). This may be set to true
316 /// in read_output_and_wait(). This is read and then reset to false
317 /// at the beginning of stream_decode_mt().
318 ///
319 /// This is needed to support applications that call lzma_code() in
320 /// such a way that more input is provided only when lzma_code()
321 /// didn't fill the output buffer completely. Basically, this makes
322 /// it easier to convert such applications from single-threaded
323 /// decoder to multi-threaded decoder.
324 bool out_was_filled;
325
326 /// Write position in buffer[] and position in Stream Padding
327 size_t pos;
328
329 /// Buffer to hold Stream Header, Block Header, and Stream Footer.
330 /// Block Header has biggest maximum size.
331 uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
332};
333
334
335/// Enables updating of outbuf->pos. This is a callback function that is
336/// used with lzma_outq_enable_partial_output().
337static void
338worker_enable_partial_update(void *thr_ptr)
339{
340 struct worker_thread *thr = thr_ptr;
341
342 mythread_sync(thr->mutex) {
343 thr->partial_update = PARTIAL_START;
344 mythread_cond_signal(&thr->cond);
345 }
346}
347
348
349/// Things do to at THR_STOP or when finishing a Block.
350/// This is called with thr->mutex locked.
351static void
352worker_stop(struct worker_thread *thr)
353{
354 // Update memory usage counters.
355 thr->coder->mem_in_use -= thr->in_size;
356 thr->in_size = 0; // thr->in was freed above.
357
358 thr->coder->mem_in_use -= thr->mem_filters;
359 thr->coder->mem_cached += thr->mem_filters;
360
361 // Put this thread to the stack of free threads.
362 thr->next = thr->coder->threads_free;
363 thr->coder->threads_free = thr;
364
365 mythread_cond_signal(&thr->coder->cond);
366 return;
367}
368
369
370static MYTHREAD_RET_TYPE
371#ifndef VBOX
372worker_decoder(void *thr_ptr)
373#else
374worker_decoder(RTTHREAD hThread, void *thr_ptr)
375#endif
376{
377 struct worker_thread *thr = thr_ptr;
378 size_t in_filled;
379 partial_update_mode partial_update;
380 lzma_ret ret;
381
382next_loop_lock:
383
384 mythread_mutex_lock(&thr->mutex);
385next_loop_unlocked:
386
387 if (thr->state == THR_IDLE) {
388 mythread_cond_wait(&thr->cond, &thr->mutex);
389 goto next_loop_unlocked;
390 }
391
392 if (thr->state == THR_EXIT) {
393 mythread_mutex_unlock(&thr->mutex);
394
395 lzma_free(thr->in, thr->allocator);
396 lzma_next_end(&thr->block_decoder, thr->allocator);
397
398 mythread_mutex_destroy(&thr->mutex);
399 mythread_cond_destroy(&thr->cond);
400
401 return MYTHREAD_RET_VALUE;
402 }
403
404 if (thr->state == THR_STOP) {
405 thr->state = THR_IDLE;
406 mythread_mutex_unlock(&thr->mutex);
407
408 mythread_sync(thr->coder->mutex) {
409 worker_stop(thr);
410 }
411
412 goto next_loop_lock;
413 }
414
415 assert(thr->state == THR_RUN);
416
417 // Update progress info for get_progress().
418 thr->progress_in = thr->in_pos;
419 thr->progress_out = thr->out_pos;
420
421 // If we don't have any new input, wait for a signal from the main
422 // thread except if partial output has just been enabled. In that
423 // case we will do one normal run so that the partial output info
424 // gets passed to the main thread. The call to block_decoder.code()
425 // is useless but harmless as it can occur only once per Block.
426 in_filled = thr->in_filled;
427 partial_update = thr->partial_update;
428
429 if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
430 mythread_cond_wait(&thr->cond, &thr->mutex);
431 goto next_loop_unlocked;
432 }
433
434 mythread_mutex_unlock(&thr->mutex);
435
436 // Pass the input in small chunks to the Block decoder.
437 // This way we react reasonably fast if we are told to stop/exit,
438 // and (when partial update is enabled) we tell about our progress
439 // to the main thread frequently enough.
440 const size_t chunk_size = 16384;
441 if ((in_filled - thr->in_pos) > chunk_size)
442 in_filled = thr->in_pos + chunk_size;
443
444 ret = thr->block_decoder.code(
445 thr->block_decoder.coder, thr->allocator,
446 thr->in, &thr->in_pos, in_filled,
447 thr->outbuf->buf, &thr->out_pos,
448 thr->outbuf->allocated, LZMA_RUN);
449
450 if (ret == LZMA_OK) {
451 if (partial_update != PARTIAL_DISABLED) {
452 // The main thread uses thr->mutex to change from
453 // PARTIAL_DISABLED to PARTIAL_START. The main thread
454 // doesn't care about this variable after that so we
455 // can safely change it here to PARTIAL_ENABLED
456 // without a mutex.
457 thr->partial_update = PARTIAL_ENABLED;
458
459 // The main thread is reading decompressed data
460 // from thr->outbuf. Tell the main thread about
461 // our progress.
462 //
463 // NOTE: It's possible that we consumed input without
464 // producing any new output so it's possible that
465 // only in_pos has changed. In case of PARTIAL_START
466 // it is possible that neither in_pos nor out_pos has
467 // changed.
468 mythread_sync(thr->coder->mutex) {
469 thr->outbuf->pos = thr->out_pos;
470 thr->outbuf->decoder_in_pos = thr->in_pos;
471 mythread_cond_signal(&thr->coder->cond);
472 }
473 }
474
475 goto next_loop_lock;
476 }
477
478 // Either we finished successfully (LZMA_STREAM_END) or an error
479 // occurred. Both cases are handled almost identically. The error
480 // case requires updating thr->coder->thread_error.
481 //
482 // The sizes are in the Block Header and the Block decoder
483 // checks that they match, thus we know these:
484 assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
485 assert(ret != LZMA_STREAM_END
486 || thr->out_pos == thr->block_options.uncompressed_size);
487
488 // Free the input buffer. Don't update in_size as we need
489 // it later to update thr->coder->mem_in_use.
490 lzma_free(thr->in, thr->allocator);
491 thr->in = NULL;
492
493 mythread_sync(thr->mutex) {
494 if (thr->state != THR_EXIT)
495 thr->state = THR_IDLE;
496 }
497
498 mythread_sync(thr->coder->mutex) {
499 // Move our progress info to the main thread.
500 thr->coder->progress_in += thr->in_pos;
501 thr->coder->progress_out += thr->out_pos;
502 thr->progress_in = 0;
503 thr->progress_out = 0;
504
505 // Mark the outbuf as finished.
506 thr->outbuf->pos = thr->out_pos;
507 thr->outbuf->decoder_in_pos = thr->in_pos;
508 thr->outbuf->finished = true;
509 thr->outbuf->finish_ret = ret;
510 thr->outbuf = NULL;
511
512 // If an error occurred, tell it to the main thread.
513 if (ret != LZMA_STREAM_END
514 && thr->coder->thread_error == LZMA_OK)
515 thr->coder->thread_error = ret;
516
517 worker_stop(thr);
518 }
519
520 goto next_loop_lock;
521}
522
523
524/// Tells the worker threads to exit and waits for them to terminate.
525static void
526threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
527{
528 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
529 mythread_sync(coder->threads[i].mutex) {
530 coder->threads[i].state = THR_EXIT;
531 mythread_cond_signal(&coder->threads[i].cond);
532 }
533 }
534
535 for (uint32_t i = 0; i < coder->threads_initialized; ++i)
536 mythread_join(coder->threads[i].thread_id);
537
538 lzma_free(coder->threads, allocator);
539 coder->threads_initialized = 0;
540 coder->threads = NULL;
541 coder->threads_free = NULL;
542
543 // The threads don't update these when they exit. Do it here.
544 coder->mem_in_use = 0;
545 coder->mem_cached = 0;
546
547 return;
548}
549
550
551static void
552threads_stop(struct lzma_stream_coder *coder)
553{
554 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
555 mythread_sync(coder->threads[i].mutex) {
556 // The state must be changed conditionally because
557 // THR_IDLE -> THR_STOP is not a valid state change.
558 if (coder->threads[i].state != THR_IDLE) {
559 coder->threads[i].state = THR_STOP;
560 mythread_cond_signal(&coder->threads[i].cond);
561 }
562 }
563 }
564
565 return;
566}
567
568
569/// Initialize a new worker_thread structure and create a new thread.
570static lzma_ret
571initialize_new_thread(struct lzma_stream_coder *coder,
572 const lzma_allocator *allocator)
573{
574 // Allocate the coder->threads array if needed. It's done here instead
575 // of when initializing the decoder because we don't need this if we
576 // use the direct mode (we may even free coder->threads in the middle
577 // of the file if we switch from threaded to direct mode).
578 if (coder->threads == NULL) {
579 coder->threads = lzma_alloc(
580 coder->threads_max * sizeof(struct worker_thread),
581 allocator);
582
583 if (coder->threads == NULL)
584 return LZMA_MEM_ERROR;
585 }
586
587 // Pick a free structure.
588 assert(coder->threads_initialized < coder->threads_max);
589 struct worker_thread *thr
590 = &coder->threads[coder->threads_initialized];
591
592 if (mythread_mutex_init(&thr->mutex))
593 goto error_mutex;
594
595 if (mythread_cond_init(&thr->cond))
596 goto error_cond;
597
598 thr->state = THR_IDLE;
599 thr->in = NULL;
600 thr->in_size = 0;
601 thr->allocator = allocator;
602 thr->coder = coder;
603 thr->outbuf = NULL;
604 thr->block_decoder = LZMA_NEXT_CODER_INIT;
605 thr->mem_filters = 0;
606
607 if (mythread_create(&thr->thread_id, worker_decoder, thr))
608 goto error_thread;
609
610 ++coder->threads_initialized;
611 coder->thr = thr;
612
613 return LZMA_OK;
614
615error_thread:
616 mythread_cond_destroy(&thr->cond);
617
618error_cond:
619 mythread_mutex_destroy(&thr->mutex);
620
621error_mutex:
622 return LZMA_MEM_ERROR;
623}
624
625
626static lzma_ret
627get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
628{
629 // If there is a free structure on the stack, use it.
630 mythread_sync(coder->mutex) {
631 if (coder->threads_free != NULL) {
632 coder->thr = coder->threads_free;
633 coder->threads_free = coder->threads_free->next;
634
635 // The thread is no longer in the cache so subtract
636 // it from the cached memory usage. Don't add it
637 // to mem_in_use though; the caller will handle it
638 // since it knows how much memory it will actually
639 // use (the filter chain might change).
640 coder->mem_cached -= coder->thr->mem_filters;
641 }
642 }
643
644 if (coder->thr == NULL) {
645 assert(coder->threads_initialized < coder->threads_max);
646
647 // Initialize a new thread.
648 return_if_error(initialize_new_thread(coder, allocator));
649 }
650
651 coder->thr->in_filled = 0;
652 coder->thr->in_pos = 0;
653 coder->thr->out_pos = 0;
654
655 coder->thr->progress_in = 0;
656 coder->thr->progress_out = 0;
657
658 coder->thr->partial_update = PARTIAL_DISABLED;
659
660 return LZMA_OK;
661}
662
663
664static lzma_ret
665read_output_and_wait(struct lzma_stream_coder *coder,
666 const lzma_allocator *allocator,
667 uint8_t *restrict out, size_t *restrict out_pos,
668 size_t out_size,
669 bool *input_is_possible,
670 bool waiting_allowed,
671 mythread_condtime *wait_abs, bool *has_blocked)
672{
673 lzma_ret ret = LZMA_OK;
674
675 mythread_sync(coder->mutex) {
676 do {
677 // Get as much output from the queue as is possible
678 // without blocking.
679 const size_t out_start = *out_pos;
680 do {
681 ret = lzma_outq_read(&coder->outq, allocator,
682 out, out_pos, out_size,
683 NULL, NULL);
684
685 // If a Block was finished, tell the worker
686 // thread of the next Block (if it is still
687 // running) to start telling the main thread
688 // when new output is available.
689 if (ret == LZMA_STREAM_END)
690 lzma_outq_enable_partial_output(
691 &coder->outq,
692 &worker_enable_partial_update);
693
694 // Loop until a Block wasn't finished.
695 // It's important to loop around even if
696 // *out_pos == out_size because there could
697 // be an empty Block that will return
698 // LZMA_STREAM_END without needing any
699 // output space.
700 } while (ret == LZMA_STREAM_END);
701
702 // Check if lzma_outq_read reported an error from
703 // the Block decoder.
704 if (ret != LZMA_OK)
705 break;
706
707 // If the output buffer is now full but it wasn't full
708 // when this function was called, set out_was_filled.
709 // This way the next call to stream_decode_mt() knows
710 // that some output was produced and no output space
711 // remained in the previous call to stream_decode_mt().
712 if (*out_pos == out_size && *out_pos != out_start)
713 coder->out_was_filled = true;
714
715 // Check if any thread has indicated an error.
716 if (coder->thread_error != LZMA_OK) {
717 // If LZMA_FAIL_FAST was used, report errors
718 // from worker threads immediately.
719 if (coder->fail_fast) {
720 ret = coder->thread_error;
721 break;
722 }
723
724 // Otherwise set pending_error. The value we
725 // set here will not actually get used other
726 // than working as a flag that an error has
727 // occurred. This is because in SEQ_ERROR
728 // all output before the error will be read
729 // first by calling this function, and once we
730 // reach the location of the (first) error the
731 // error code from the above lzma_outq_read()
732 // will be returned to the application.
733 //
734 // Use LZMA_PROG_ERROR since the value should
735 // never leak to the application. It's
736 // possible that pending_error has already
737 // been set but that doesn't matter: if we get
738 // here, pending_error only works as a flag.
739 coder->pending_error = LZMA_PROG_ERROR;
740 }
741
742 // Check if decoding of the next Block can be started.
743 // The memusage of the active threads must be low
744 // enough, there must be a free buffer slot in the
745 // output queue, and there must be a free thread
746 // (that can be either created or an existing one
747 // reused).
748 //
749 // NOTE: This is checked after reading the output
750 // above because reading the output can free a slot in
751 // the output queue and also reduce active memusage.
752 //
753 // NOTE: If output queue is empty, then input will
754 // always be possible.
755 if (input_is_possible != NULL
756 && coder->memlimit_threading
757 - coder->mem_in_use
758 - coder->outq.mem_in_use
759 >= coder->mem_next_block
760 && lzma_outq_has_buf(&coder->outq)
761 && (coder->threads_initialized
762 < coder->threads_max
763 || coder->threads_free
764 != NULL)) {
765 *input_is_possible = true;
766 break;
767 }
768
769 // If the caller doesn't want us to block, return now.
770 if (!waiting_allowed)
771 break;
772
773 // This check is needed only when input_is_possible
774 // is NULL. We must return if we aren't waiting for
775 // input to become possible and there is no more
776 // output coming from the queue.
777 if (lzma_outq_is_empty(&coder->outq)) {
778 assert(input_is_possible == NULL);
779 break;
780 }
781
782 // If there is more data available from the queue,
783 // our out buffer must be full and we need to return
784 // so that the application can provide more output
785 // space.
786 //
787 // NOTE: In general lzma_outq_is_readable() can return
788 // true also when there are no more bytes available.
789 // This can happen when a Block has finished without
790 // providing any new output. We know that this is not
791 // the case because in the beginning of this loop we
792 // tried to read as much as possible even when we had
793 // no output space left and the mutex has been locked
794 // all the time (so worker threads cannot have changed
795 // anything). Thus there must be actual pending output
796 // in the queue.
797 if (lzma_outq_is_readable(&coder->outq)) {
798 assert(*out_pos == out_size);
799 break;
800 }
801
802 // If the application stops providing more input
803 // in the middle of a Block, there will eventually
804 // be one worker thread left that is stuck waiting for
805 // more input (that might never arrive) and a matching
806 // outbuf which the worker thread cannot finish due
807 // to lack of input. We must detect this situation,
808 // otherwise we would end up waiting indefinitely
809 // (if no timeout is in use) or keep returning
810 // LZMA_TIMED_OUT while making no progress. Thus, the
811 // application would never get LZMA_BUF_ERROR from
812 // lzma_code() which would tell the application that
813 // no more progress is possible. No LZMA_BUF_ERROR
814 // means that, for example, truncated .xz files could
815 // cause an infinite loop.
816 //
817 // A worker thread doing partial updates will
818 // store not only the output position in outbuf->pos
819 // but also the matching input position in
820 // outbuf->decoder_in_pos. Here we check if that
821 // input position matches the amount of input that
822 // the worker thread has been given (in_filled).
823 // If so, we must return and not wait as no more
824 // output will be coming without first getting more
825 // input to the worker thread. If the application
826 // keeps calling lzma_code() without providing more
827 // input, it will eventually get LZMA_BUF_ERROR.
828 //
829 // NOTE: We can read partial_update and in_filled
830 // without thr->mutex as only the main thread
831 // modifies these variables. decoder_in_pos requires
832 // coder->mutex which we are already holding.
833 if (coder->thr != NULL && coder->thr->partial_update
834 != PARTIAL_DISABLED) {
835 // There is exactly one outbuf in the queue.
836 assert(coder->thr->outbuf == coder->outq.head);
837 assert(coder->thr->outbuf == coder->outq.tail);
838
839 if (coder->thr->outbuf->decoder_in_pos
840 == coder->thr->in_filled)
841 break;
842 }
843
844 // Wait for input or output to become possible.
845 if (coder->timeout != 0) {
846 // See the comment in stream_encoder_mt.c
847 // about why mythread_condtime_set() is used
848 // like this.
849 //
850 // FIXME?
851 // In contrast to the encoder, this calls
852 // _condtime_set while the mutex is locked.
853 if (!*has_blocked) {
854 *has_blocked = true;
855 mythread_condtime_set(wait_abs,
856 &coder->cond,
857 coder->timeout);
858 }
859
860 if (mythread_cond_timedwait(&coder->cond,
861 &coder->mutex,
862 wait_abs) != 0) {
863 ret = LZMA_TIMED_OUT;
864 break;
865 }
866 } else {
867 mythread_cond_wait(&coder->cond,
868 &coder->mutex);
869 }
870 } while (ret == LZMA_OK);
871 }
872
873 // If we are returning an error, then the application cannot get
874 // more output from us and thus keeping the threads running is
875 // useless and waste of CPU time.
876 if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
877 threads_stop(coder);
878
879 return ret;
880}
881
882
883static lzma_ret
884decode_block_header(struct lzma_stream_coder *coder,
885 const lzma_allocator *allocator, const uint8_t *restrict in,
886 size_t *restrict in_pos, size_t in_size)
887{
888 if (*in_pos >= in_size)
889 return LZMA_OK;
890
891 if (coder->pos == 0) {
892 // Detect if it's Index.
893 if (in[*in_pos] == INDEX_INDICATOR)
894 return LZMA_INDEX_DETECTED;
895
896 // Calculate the size of the Block Header. Note that
897 // Block Header decoder wants to see this byte too
898 // so don't advance *in_pos.
899 coder->block_options.header_size
900 = lzma_block_header_size_decode(
901 in[*in_pos]);
902 }
903
904 // Copy the Block Header to the internal buffer.
905 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
906 coder->block_options.header_size);
907
908 // Return if we didn't get the whole Block Header yet.
909 if (coder->pos < coder->block_options.header_size)
910 return LZMA_OK;
911
912 coder->pos = 0;
913
914 // Version 1 is needed to support the .ignore_check option.
915 coder->block_options.version = 1;
916
917 // Block Header decoder will initialize all members of this array
918 // so we don't need to do it here.
919 coder->block_options.filters = coder->filters;
920
921 // Decode the Block Header.
922 return_if_error(lzma_block_header_decode(&coder->block_options,
923 allocator, coder->buffer));
924
925 // If LZMA_IGNORE_CHECK was used, this flag needs to be set.
926 // It has to be set after lzma_block_header_decode() because
927 // it always resets this to false.
928 coder->block_options.ignore_check = coder->ignore_check;
929
930 // coder->block_options is ready now.
931 return LZMA_STREAM_END;
932}
933
934
935/// Get the size of the Compressed Data + Block Padding + Check.
936static size_t
937comp_blk_size(const struct lzma_stream_coder *coder)
938{
939 return vli_ceil4(coder->block_options.compressed_size)
940 + lzma_check_size(coder->stream_flags.check);
941}
942
943
944/// Returns true if the size (compressed or uncompressed) is such that
945/// threaded decompression cannot be used. Sizes that are too big compared
946/// to SIZE_MAX must be rejected to avoid integer overflows and truncations
947/// when lzma_vli is assigned to a size_t.
948static bool
949is_direct_mode_needed(lzma_vli size)
950{
951 return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
952}
953
954
955static lzma_ret
956stream_decoder_reset(struct lzma_stream_coder *coder,
957 const lzma_allocator *allocator)
958{
959 // Initialize the Index hash used to verify the Index.
960 coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
961 if (coder->index_hash == NULL)
962 return LZMA_MEM_ERROR;
963
964 // Reset the rest of the variables.
965 coder->sequence = SEQ_STREAM_HEADER;
966 coder->pos = 0;
967
968 return LZMA_OK;
969}
970
971
972static lzma_ret
973stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
974 const uint8_t *restrict in, size_t *restrict in_pos,
975 size_t in_size,
976 uint8_t *restrict out, size_t *restrict out_pos,
977 size_t out_size, lzma_action action)
978{
979 struct lzma_stream_coder *coder = coder_ptr;
980
981 mythread_condtime wait_abs;
982 bool has_blocked = false;
983
984 // Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
985 // tell read_output_and_wait() to wait until it can fill the output
986 // buffer (or a timeout occurs). Two conditions must be met:
987 //
988 // (1) If the caller provided no new input. The reason for this
989 // can be, for example, the end of the file or that there is
990 // a pause in the input stream and more input is available
991 // a little later. In this situation we should wait for output
992 // because otherwise we would end up in a busy-waiting loop where
993 // we make no progress and the application just calls us again
994 // without providing any new input. This would then result in
995 // LZMA_BUF_ERROR even though more output would be available
996 // once the worker threads decode more data.
997 //
998 // (2) Even if (1) is true, we will not wait if the previous call to
999 // this function managed to produce some output and the output
1000 // buffer became full. This is for compatibility with applications
1001 // that call lzma_code() in such a way that new input is provided
1002 // only when the output buffer didn't become full. Without this
1003 // trick such applications would have bad performance (bad
1004 // parallelization due to decoder not getting input fast enough).
1005 //
1006 // NOTE: Such loops might require that timeout is disabled (0)
1007 // if they assume that output-not-full implies that all input has
1008 // been consumed. If and only if timeout is enabled, we may return
1009 // when output isn't full *and* not all input has been consumed.
1010 //
1011 // However, if LZMA_FINISH is used, the above is ignored and we always
1012 // wait (timeout can still cause us to return) because we know that
1013 // we won't get any more input. This matters if the input file is
1014 // truncated and we are doing single-shot decoding, that is,
1015 // timeout = 0 and LZMA_FINISH is used on the first call to
1016 // lzma_code() and the output buffer is known to be big enough
1017 // to hold all uncompressed data:
1018 //
1019 // - If LZMA_FINISH wasn't handled specially, we could return
1020 // LZMA_OK before providing all output that is possible with the
1021 // truncated input. The rest would be available if lzma_code() was
1022 // called again but then it's not single-shot decoding anymore.
1023 //
1024 // - By handling LZMA_FINISH specially here, the first call will
1025 // produce all the output, matching the behavior of the
1026 // single-threaded decoder.
1027 //
1028 // So it's a very specific corner case but also easy to avoid. Note
1029 // that this special handling of LZMA_FINISH has no effect for
1030 // single-shot decoding when the input file is valid (not truncated);
1031 // premature LZMA_OK wouldn't be possible as long as timeout = 0.
1032 const bool waiting_allowed = action == LZMA_FINISH
1033 || (*in_pos == in_size && !coder->out_was_filled);
1034 coder->out_was_filled = false;
1035
1036 while (true)
1037 switch (coder->sequence) {
1038 case SEQ_STREAM_HEADER: {
1039 // Copy the Stream Header to the internal buffer.
1040 const size_t in_old = *in_pos;
1041 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1042 LZMA_STREAM_HEADER_SIZE);
1043 coder->progress_in += *in_pos - in_old;
1044
1045 // Return if we didn't get the whole Stream Header yet.
1046 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1047 return LZMA_OK;
1048
1049 coder->pos = 0;
1050
1051 // Decode the Stream Header.
1052 const lzma_ret ret = lzma_stream_header_decode(
1053 &coder->stream_flags, coder->buffer);
1054 if (ret != LZMA_OK)
1055 return ret == LZMA_FORMAT_ERROR && !coder->first_stream
1056 ? LZMA_DATA_ERROR : ret;
1057
1058 // If we are decoding concatenated Streams, and the later
1059 // Streams have invalid Header Magic Bytes, we give
1060 // LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
1061 coder->first_stream = false;
1062
1063 // Copy the type of the Check so that Block Header and Block
1064 // decoders see it.
1065 coder->block_options.check = coder->stream_flags.check;
1066
1067 // Even if we return LZMA_*_CHECK below, we want
1068 // to continue from Block Header decoding.
1069 coder->sequence = SEQ_BLOCK_HEADER;
1070
1071 // Detect if there's no integrity check or if it is
1072 // unsupported if those were requested by the application.
1073 if (coder->tell_no_check && coder->stream_flags.check
1074 == LZMA_CHECK_NONE)
1075 return LZMA_NO_CHECK;
1076
1077 if (coder->tell_unsupported_check
1078 && !lzma_check_is_supported(
1079 coder->stream_flags.check))
1080 return LZMA_UNSUPPORTED_CHECK;
1081
1082 if (coder->tell_any_check)
1083 return LZMA_GET_CHECK;
1084 }
1085
1086 // Fall through
1087
1088 case SEQ_BLOCK_HEADER: {
1089 const size_t in_old = *in_pos;
1090 const lzma_ret ret = decode_block_header(coder, allocator,
1091 in, in_pos, in_size);
1092 coder->progress_in += *in_pos - in_old;
1093
1094 if (ret == LZMA_OK) {
1095 // We didn't decode the whole Block Header yet.
1096 //
1097 // Read output from the queue before returning. This
1098 // is important because it is possible that the
1099 // application doesn't have any new input available
1100 // immediately. If we didn't try to copy output from
1101 // the output queue here, lzma_code() could end up
1102 // returning LZMA_BUF_ERROR even though queued output
1103 // is available.
1104 //
1105 // If the lzma_code() call provided at least one input
1106 // byte, only copy as much data from the output queue
1107 // as is available immediately. This way the
1108 // application will be able to provide more input
1109 // without a delay.
1110 //
1111 // On the other hand, if lzma_code() was called with
1112 // an empty input buffer(*), treat it specially: try
1113 // to fill the output buffer even if it requires
1114 // waiting for the worker threads to provide output
1115 // (timeout, if specified, can still cause us to
1116 // return).
1117 //
1118 // - This way the application will be able to get all
1119 // data that can be decoded from the input provided
1120 // so far.
1121 //
1122 // - We avoid both premature LZMA_BUF_ERROR and
1123 // busy-waiting where the application repeatedly
1124 // calls lzma_code() which immediately returns
1125 // LZMA_OK without providing new data.
1126 //
1127 // - If the queue becomes empty, we won't wait
1128 // anything and will return LZMA_OK immediately
1129 // (coder->timeout is completely ignored).
1130 //
1131 // (*) See the comment at the beginning of this
1132 // function how waiting_allowed is determined
1133 // and why there is an exception to the rule
1134 // of "called with an empty input buffer".
1135 assert(*in_pos == in_size);
1136
1137 // If LZMA_FINISH was used we know that we won't get
1138 // more input, so the file must be truncated if we
1139 // get here. If worker threads don't detect any
1140 // errors, eventually there will be no more output
1141 // while we keep returning LZMA_OK which gets
1142 // converted to LZMA_BUF_ERROR in lzma_code().
1143 //
1144 // If fail-fast is enabled then we will return
1145 // immediately using LZMA_DATA_ERROR instead of
1146 // LZMA_OK or LZMA_BUF_ERROR. Rationale for the
1147 // error code:
1148 //
1149 // - Worker threads may have a large amount of
1150 // not-yet-decoded input data and we don't
1151 // know for sure if all data is valid. Bad
1152 // data there would result in LZMA_DATA_ERROR
1153 // when fail-fast isn't used.
1154 //
1155 // - Immediate LZMA_BUF_ERROR would be a bit weird
1156 // considering the older liblzma code. lzma_code()
1157 // even has an assertion to prevent coders from
1158 // returning LZMA_BUF_ERROR directly.
1159 //
1160 // The downside of this is that with fail-fast apps
1161 // cannot always distinguish between corrupt and
1162 // truncated files.
1163 if (action == LZMA_FINISH && coder->fail_fast) {
1164 // We won't produce any more output. Stop
1165 // the unfinished worker threads so they
1166 // won't waste CPU time.
1167 threads_stop(coder);
1168 return LZMA_DATA_ERROR;
1169 }
1170
1171 // read_output_and_wait() will call threads_stop()
1172 // if needed so with that we can use return_if_error.
1173 return_if_error(read_output_and_wait(coder, allocator,
1174 out, out_pos, out_size,
1175 NULL, waiting_allowed,
1176 &wait_abs, &has_blocked));
1177
1178 if (coder->pending_error != LZMA_OK) {
1179 coder->sequence = SEQ_ERROR;
1180 break;
1181 }
1182
1183 return LZMA_OK;
1184 }
1185
1186 if (ret == LZMA_INDEX_DETECTED) {
1187 coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
1188 break;
1189 }
1190
1191 // See if an error occurred.
1192 if (ret != LZMA_STREAM_END) {
1193 // NOTE: Here and in all other places where
1194 // pending_error is set, it may overwrite the value
1195 // (LZMA_PROG_ERROR) set by read_output_and_wait().
1196 // That function might overwrite value set here too.
1197 // These are fine because when read_output_and_wait()
1198 // sets pending_error, it actually works as a flag
1199 // variable only ("some error has occurred") and the
1200 // actual value of pending_error is not used in
1201 // SEQ_ERROR. In such cases SEQ_ERROR will eventually
1202 // get the correct error code from the return value of
1203 // a later read_output_and_wait() call.
1204 coder->pending_error = ret;
1205 coder->sequence = SEQ_ERROR;
1206 break;
1207 }
1208
1209 // Calculate the memory usage of the filters / Block decoder.
1210 coder->mem_next_filters = lzma_raw_decoder_memusage(
1211 coder->filters);
1212
1213 if (coder->mem_next_filters == UINT64_MAX) {
1214 // One or more unknown Filter IDs.
1215 coder->pending_error = LZMA_OPTIONS_ERROR;
1216 coder->sequence = SEQ_ERROR;
1217 break;
1218 }
1219
1220 coder->sequence = SEQ_BLOCK_INIT;
1221 }
1222
1223 // Fall through
1224
1225 case SEQ_BLOCK_INIT: {
1226 // Check if decoding is possible at all with the current
1227 // memlimit_stop which we must never exceed.
1228 //
1229 // This needs to be the first thing in SEQ_BLOCK_INIT
1230 // to make it possible to restart decoding after increasing
1231 // memlimit_stop with lzma_memlimit_set().
1232 if (coder->mem_next_filters > coder->memlimit_stop) {
1233 // Flush pending output before returning
1234 // LZMA_MEMLIMIT_ERROR. If the application doesn't
1235 // want to increase the limit, at least it will get
1236 // all the output possible so far.
1237 return_if_error(read_output_and_wait(coder, allocator,
1238 out, out_pos, out_size,
1239 NULL, true, &wait_abs, &has_blocked));
1240
1241 if (!lzma_outq_is_empty(&coder->outq))
1242 return LZMA_OK;
1243
1244 return LZMA_MEMLIMIT_ERROR;
1245 }
1246
1247 // Check if the size information is available in Block Header.
1248 // If it is, check if the sizes are small enough that we don't
1249 // need to worry *too* much about integer overflows later in
1250 // the code. If these conditions are not met, we must use the
1251 // single-threaded direct mode.
1252 if (is_direct_mode_needed(coder->block_options.compressed_size)
1253 || is_direct_mode_needed(
1254 coder->block_options.uncompressed_size)) {
1255 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1256 break;
1257 }
1258
1259 // Calculate the amount of memory needed for the input and
1260 // output buffers in threaded mode.
1261 //
1262 // These cannot overflow because we already checked that
1263 // the sizes are small enough using is_direct_mode_needed().
1264 coder->mem_next_in = comp_blk_size(coder);
1265 const uint64_t mem_buffers = coder->mem_next_in
1266 + lzma_outq_outbuf_memusage(
1267 coder->block_options.uncompressed_size);
1268
1269 // Add the amount needed by the filters.
1270 // Avoid integer overflows.
1271 if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
1272 // Use direct mode if the memusage would overflow.
1273 // This is a theoretical case that shouldn't happen
1274 // in practice unless the input file is weird (broken
1275 // or malicious).
1276 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1277 break;
1278 }
1279
1280 // Amount of memory needed to decode this Block in
1281 // threaded mode:
1282 coder->mem_next_block = coder->mem_next_filters + mem_buffers;
1283
1284 // If this alone would exceed memlimit_threading, then we must
1285 // use the single-threaded direct mode.
1286 if (coder->mem_next_block > coder->memlimit_threading) {
1287 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1288 break;
1289 }
1290
1291 // Use the threaded mode. Free the direct mode decoder in
1292 // case it has been initialized.
1293 lzma_next_end(&coder->block_decoder, allocator);
1294 coder->mem_direct_mode = 0;
1295
1296 // Since we already know what the sizes are supposed to be,
1297 // we can already add them to the Index hash. The Block
1298 // decoder will verify the values while decoding.
1299 const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
1300 lzma_block_unpadded_size(
1301 &coder->block_options),
1302 coder->block_options.uncompressed_size);
1303 if (ret != LZMA_OK) {
1304 coder->pending_error = ret;
1305 coder->sequence = SEQ_ERROR;
1306 break;
1307 }
1308
1309 coder->sequence = SEQ_BLOCK_THR_INIT;
1310 }
1311
1312 // Fall through
1313
1314 case SEQ_BLOCK_THR_INIT: {
1315 // We need to wait for a multiple conditions to become true
1316 // until we can initialize the Block decoder and let a worker
1317 // thread decode it:
1318 //
1319 // - Wait for the memory usage of the active threads to drop
1320 // so that starting the decoding of this Block won't make
1321 // us go over memlimit_threading.
1322 //
1323 // - Wait for at least one free output queue slot.
1324 //
1325 // - Wait for a free worker thread.
1326 //
1327 // While we wait, we must copy decompressed data to the out
1328 // buffer and catch possible decoder errors.
1329 //
1330 // read_output_and_wait() does all the above.
1331 bool block_can_start = false;
1332
1333 return_if_error(read_output_and_wait(coder, allocator,
1334 out, out_pos, out_size,
1335 &block_can_start, true,
1336 &wait_abs, &has_blocked));
1337
1338 if (coder->pending_error != LZMA_OK) {
1339 coder->sequence = SEQ_ERROR;
1340 break;
1341 }
1342
1343 if (!block_can_start) {
1344 // It's not a timeout because return_if_error handles
1345 // it already. Output queue cannot be empty either
1346 // because in that case block_can_start would have
1347 // been true. Thus the output buffer must be full and
1348 // the queue isn't empty.
1349 assert(*out_pos == out_size);
1350 assert(!lzma_outq_is_empty(&coder->outq));
1351 return LZMA_OK;
1352 }
1353
1354 // We know that we can start decoding this Block without
1355 // exceeding memlimit_threading. However, to stay below
1356 // memlimit_threading may require freeing some of the
1357 // cached memory.
1358 //
1359 // Get a local copy of variables that require locking the
1360 // mutex. It is fine if the worker threads modify the real
1361 // values after we read these as those changes can only be
1362 // towards more favorable conditions (less memory in use,
1363 // more in cache).
1364 //
1365 // These are initialized to silence warnings.
1366#ifndef VBOX
1367 uint64_t mem_in_use;
1368 uint64_t mem_cached;
1369#else
1370 uint64_t mem_in_use = 0;
1371 uint64_t mem_cached = 0;
1372#endif
1373 struct worker_thread *thr = NULL;
1374
1375 mythread_sync(coder->mutex) {
1376 mem_in_use = coder->mem_in_use;
1377 mem_cached = coder->mem_cached;
1378 thr = coder->threads_free;
1379 }
1380
1381 // The maximum amount of memory that can be held by other
1382 // threads and cached buffers while allowing us to start
1383 // decoding the next Block.
1384 const uint64_t mem_max = coder->memlimit_threading
1385 - coder->mem_next_block;
1386
1387 // If the existing allocations are so large that starting
1388 // to decode this Block might exceed memlimit_threads,
1389 // try to free memory from the output queue cache first.
1390 //
1391 // NOTE: This math assumes the worst case. It's possible
1392 // that the limit wouldn't be exceeded if the existing cached
1393 // allocations are reused.
1394 if (mem_in_use + mem_cached + coder->outq.mem_allocated
1395 > mem_max) {
1396 // Clear the outq cache except leave one buffer in
1397 // the cache if its size is correct. That way we
1398 // don't free and almost immediately reallocate
1399 // an identical buffer.
1400 lzma_outq_clear_cache2(&coder->outq, allocator,
1401 coder->block_options.uncompressed_size);
1402 }
1403
1404 // If there is at least one worker_thread in the cache and
1405 // the existing allocations are so large that starting to
1406 // decode this Block might exceed memlimit_threads, free
1407 // memory by freeing cached Block decoders.
1408 //
1409 // NOTE: The comparison is different here than above.
1410 // Here we don't care about cached buffers in outq anymore
1411 // and only look at memory actually in use. This is because
1412 // if there is something in outq cache, it's a single buffer
1413 // that can be used as is. We ensured this in the above
1414 // if-block.
1415 uint64_t mem_freed = 0;
1416 if (thr != NULL && mem_in_use + mem_cached
1417 + coder->outq.mem_in_use > mem_max) {
1418 // Don't free the first Block decoder if its memory
1419 // usage isn't greater than what this Block will need.
1420 // Typically the same filter chain is used for all
1421 // Blocks so this way the allocations can be reused
1422 // when get_thread() picks the first worker_thread
1423 // from the cache.
1424 if (thr->mem_filters <= coder->mem_next_filters)
1425 thr = thr->next;
1426
1427 while (thr != NULL) {
1428 lzma_next_end(&thr->block_decoder, allocator);
1429 mem_freed += thr->mem_filters;
1430 thr->mem_filters = 0;
1431 thr = thr->next;
1432 }
1433 }
1434
1435 // Update the memory usage counters. Note that coder->mem_*
1436 // may have changed since we read them so we must subtract
1437 // or add the changes.
1438 mythread_sync(coder->mutex) {
1439 coder->mem_cached -= mem_freed;
1440
1441 // Memory needed for the filters and the input buffer.
1442 // The output queue takes care of its own counter so
1443 // we don't touch it here.
1444 //
1445 // NOTE: After this, coder->mem_in_use +
1446 // coder->mem_cached might count the same thing twice.
1447 // If so, this will get corrected in get_thread() when
1448 // a worker_thread is picked from coder->free_threads
1449 // and its memory usage is subtracted from mem_cached.
1450 coder->mem_in_use += coder->mem_next_in
1451 + coder->mem_next_filters;
1452 }
1453
1454 // Allocate memory for the output buffer in the output queue.
1455 lzma_ret ret = lzma_outq_prealloc_buf(
1456 &coder->outq, allocator,
1457 coder->block_options.uncompressed_size);
1458 if (ret != LZMA_OK) {
1459 threads_stop(coder);
1460 return ret;
1461 }
1462
1463 // Set up coder->thr.
1464 ret = get_thread(coder, allocator);
1465 if (ret != LZMA_OK) {
1466 threads_stop(coder);
1467 return ret;
1468 }
1469
1470 // The new Block decoder memory usage is already counted in
1471 // coder->mem_in_use. Store it in the thread too.
1472 coder->thr->mem_filters = coder->mem_next_filters;
1473
1474 // Initialize the Block decoder.
1475 coder->thr->block_options = coder->block_options;
1476 ret = lzma_block_decoder_init(
1477 &coder->thr->block_decoder, allocator,
1478 &coder->thr->block_options);
1479
1480 // Free the allocated filter options since they are needed
1481 // only to initialize the Block decoder.
1482 lzma_filters_free(coder->filters, allocator);
1483 coder->thr->block_options.filters = NULL;
1484
1485 // Check if memory usage calculation and Block encoder
1486 // initialization succeeded.
1487 if (ret != LZMA_OK) {
1488 coder->pending_error = ret;
1489 coder->sequence = SEQ_ERROR;
1490 break;
1491 }
1492
1493 // Allocate the input buffer.
1494 coder->thr->in_size = coder->mem_next_in;
1495 coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
1496 if (coder->thr->in == NULL) {
1497 threads_stop(coder);
1498 return LZMA_MEM_ERROR;
1499 }
1500
1501 // Get the preallocated output buffer.
1502 coder->thr->outbuf = lzma_outq_get_buf(
1503 &coder->outq, coder->thr);
1504
1505 // Start the decoder.
1506 mythread_sync(coder->thr->mutex) {
1507 assert(coder->thr->state == THR_IDLE);
1508 coder->thr->state = THR_RUN;
1509 mythread_cond_signal(&coder->thr->cond);
1510 }
1511
1512 // Enable output from the thread that holds the oldest output
1513 // buffer in the output queue (if such a thread exists).
1514 mythread_sync(coder->mutex) {
1515 lzma_outq_enable_partial_output(&coder->outq,
1516 &worker_enable_partial_update);
1517 }
1518
1519 coder->sequence = SEQ_BLOCK_THR_RUN;
1520 }
1521
1522 // Fall through
1523
1524 case SEQ_BLOCK_THR_RUN: {
1525 if (action == LZMA_FINISH && coder->fail_fast) {
1526 // We know that we won't get more input and that
1527 // the caller wants fail-fast behavior. If we see
1528 // that we don't have enough input to finish this
1529 // Block, return LZMA_DATA_ERROR immediately.
1530 // See SEQ_BLOCK_HEADER for the error code rationale.
1531 const size_t in_avail = in_size - *in_pos;
1532 const size_t in_needed = coder->thr->in_size
1533 - coder->thr->in_filled;
1534 if (in_avail < in_needed) {
1535 threads_stop(coder);
1536 return LZMA_DATA_ERROR;
1537 }
1538 }
1539
1540 // Copy input to the worker thread.
1541 size_t cur_in_filled = coder->thr->in_filled;
1542 lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
1543 &cur_in_filled, coder->thr->in_size);
1544
1545 // Tell the thread how much we copied.
1546 mythread_sync(coder->thr->mutex) {
1547 coder->thr->in_filled = cur_in_filled;
1548
1549 // NOTE: Most of the time we are copying input faster
1550 // than the thread can decode so most of the time
1551 // calling mythread_cond_signal() is useless but
1552 // we cannot make it conditional because thr->in_pos
1553 // is updated without a mutex. And the overhead should
1554 // be very much negligible anyway.
1555 mythread_cond_signal(&coder->thr->cond);
1556 }
1557
1558 // Read output from the output queue. Just like in
1559 // SEQ_BLOCK_HEADER, we wait to fill the output buffer
1560 // only if waiting_allowed was set to true in the beginning
1561 // of this function (see the comment there).
1562 return_if_error(read_output_and_wait(coder, allocator,
1563 out, out_pos, out_size,
1564 NULL, waiting_allowed,
1565 &wait_abs, &has_blocked));
1566
1567 if (coder->pending_error != LZMA_OK) {
1568 coder->sequence = SEQ_ERROR;
1569 break;
1570 }
1571
1572 // Return if the input didn't contain the whole Block.
1573 if (coder->thr->in_filled < coder->thr->in_size) {
1574 assert(*in_pos == in_size);
1575 return LZMA_OK;
1576 }
1577
1578 // The whole Block has been copied to the thread-specific
1579 // buffer. Continue from the next Block Header or Index.
1580 coder->thr = NULL;
1581 coder->sequence = SEQ_BLOCK_HEADER;
1582 break;
1583 }
1584
1585 case SEQ_BLOCK_DIRECT_INIT: {
1586 // Wait for the threads to finish and that all decoded data
1587 // has been copied to the output. That is, wait until the
1588 // output queue becomes empty.
1589 //
1590 // NOTE: No need to check for coder->pending_error as
1591 // we aren't consuming any input until the queue is empty
1592 // and if there is a pending error, read_output_and_wait()
1593 // will eventually return it before the queue is empty.
1594 return_if_error(read_output_and_wait(coder, allocator,
1595 out, out_pos, out_size,
1596 NULL, true, &wait_abs, &has_blocked));
1597 if (!lzma_outq_is_empty(&coder->outq))
1598 return LZMA_OK;
1599
1600 // Free the cached output buffers.
1601 lzma_outq_clear_cache(&coder->outq, allocator);
1602
1603 // Get rid of the worker threads, including the coder->threads
1604 // array.
1605 threads_end(coder, allocator);
1606
1607 // Initialize the Block decoder.
1608 const lzma_ret ret = lzma_block_decoder_init(
1609 &coder->block_decoder, allocator,
1610 &coder->block_options);
1611
1612 // Free the allocated filter options since they are needed
1613 // only to initialize the Block decoder.
1614 lzma_filters_free(coder->filters, allocator);
1615 coder->block_options.filters = NULL;
1616
1617 // Check if Block decoder initialization succeeded.
1618 if (ret != LZMA_OK)
1619 return ret;
1620
1621 // Make the memory usage visible to _memconfig().
1622 coder->mem_direct_mode = coder->mem_next_filters;
1623
1624 coder->sequence = SEQ_BLOCK_DIRECT_RUN;
1625 }
1626
1627 // Fall through
1628
1629 case SEQ_BLOCK_DIRECT_RUN: {
1630 const size_t in_old = *in_pos;
1631 const size_t out_old = *out_pos;
1632 const lzma_ret ret = coder->block_decoder.code(
1633 coder->block_decoder.coder, allocator,
1634 in, in_pos, in_size, out, out_pos, out_size,
1635 action);
1636 coder->progress_in += *in_pos - in_old;
1637 coder->progress_out += *out_pos - out_old;
1638
1639 if (ret != LZMA_STREAM_END)
1640 return ret;
1641
1642 // Block decoded successfully. Add the new size pair to
1643 // the Index hash.
1644 return_if_error(lzma_index_hash_append(coder->index_hash,
1645 lzma_block_unpadded_size(
1646 &coder->block_options),
1647 coder->block_options.uncompressed_size));
1648
1649 coder->sequence = SEQ_BLOCK_HEADER;
1650 break;
1651 }
1652
1653 case SEQ_INDEX_WAIT_OUTPUT:
1654 // Flush the output from all worker threads so that we can
1655 // decode the Index without thinking about threading.
1656 return_if_error(read_output_and_wait(coder, allocator,
1657 out, out_pos, out_size,
1658 NULL, true, &wait_abs, &has_blocked));
1659
1660 if (!lzma_outq_is_empty(&coder->outq))
1661 return LZMA_OK;
1662
1663 coder->sequence = SEQ_INDEX_DECODE;
1664
1665 // Fall through
1666
1667 case SEQ_INDEX_DECODE: {
1668 // If we don't have any input, don't call
1669 // lzma_index_hash_decode() since it would return
1670 // LZMA_BUF_ERROR, which we must not do here.
1671 if (*in_pos >= in_size)
1672 return LZMA_OK;
1673
1674 // Decode the Index and compare it to the hash calculated
1675 // from the sizes of the Blocks (if any).
1676 const size_t in_old = *in_pos;
1677 const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
1678 in, in_pos, in_size);
1679 coder->progress_in += *in_pos - in_old;
1680 if (ret != LZMA_STREAM_END)
1681 return ret;
1682
1683 coder->sequence = SEQ_STREAM_FOOTER;
1684 }
1685
1686 // Fall through
1687
1688 case SEQ_STREAM_FOOTER: {
1689 // Copy the Stream Footer to the internal buffer.
1690 const size_t in_old = *in_pos;
1691 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1692 LZMA_STREAM_HEADER_SIZE);
1693 coder->progress_in += *in_pos - in_old;
1694
1695 // Return if we didn't get the whole Stream Footer yet.
1696 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1697 return LZMA_OK;
1698
1699 coder->pos = 0;
1700
1701 // Decode the Stream Footer. The decoder gives
1702 // LZMA_FORMAT_ERROR if the magic bytes don't match,
1703 // so convert that return code to LZMA_DATA_ERROR.
1704 lzma_stream_flags footer_flags;
1705 const lzma_ret ret = lzma_stream_footer_decode(
1706 &footer_flags, coder->buffer);
1707 if (ret != LZMA_OK)
1708 return ret == LZMA_FORMAT_ERROR
1709 ? LZMA_DATA_ERROR : ret;
1710
1711 // Check that Index Size stored in the Stream Footer matches
1712 // the real size of the Index field.
1713 if (lzma_index_hash_size(coder->index_hash)
1714 != footer_flags.backward_size)
1715 return LZMA_DATA_ERROR;
1716
1717 // Compare that the Stream Flags fields are identical in
1718 // both Stream Header and Stream Footer.
1719 return_if_error(lzma_stream_flags_compare(
1720 &coder->stream_flags, &footer_flags));
1721
1722 if (!coder->concatenated)
1723 return LZMA_STREAM_END;
1724
1725 coder->sequence = SEQ_STREAM_PADDING;
1726 }
1727
1728 // Fall through
1729
1730 case SEQ_STREAM_PADDING:
1731 assert(coder->concatenated);
1732
1733 // Skip over possible Stream Padding.
1734 while (true) {
1735 if (*in_pos >= in_size) {
1736 // Unless LZMA_FINISH was used, we cannot
1737 // know if there's more input coming later.
1738 if (action != LZMA_FINISH)
1739 return LZMA_OK;
1740
1741 // Stream Padding must be a multiple of
1742 // four bytes.
1743 return coder->pos == 0
1744 ? LZMA_STREAM_END
1745 : LZMA_DATA_ERROR;
1746 }
1747
1748 // If the byte is not zero, it probably indicates
1749 // beginning of a new Stream (or the file is corrupt).
1750 if (in[*in_pos] != 0x00)
1751 break;
1752
1753 ++*in_pos;
1754 ++coder->progress_in;
1755 coder->pos = (coder->pos + 1) & 3;
1756 }
1757
1758 // Stream Padding must be a multiple of four bytes (empty
1759 // Stream Padding is OK).
1760 if (coder->pos != 0) {
1761 ++*in_pos;
1762 ++coder->progress_in;
1763 return LZMA_DATA_ERROR;
1764 }
1765
1766 // Prepare to decode the next Stream.
1767 return_if_error(stream_decoder_reset(coder, allocator));
1768 break;
1769
1770 case SEQ_ERROR:
1771 if (!coder->fail_fast) {
1772 // Let the application get all data before the point
1773 // where the error was detected. This matches the
1774 // behavior of single-threaded use.
1775 //
1776 // FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
1777 // they are returned immediately. Thus in rare cases
1778 // the output will be less than in the single-threaded
1779 // mode. Maybe this doesn't matter much in practice.
1780 return_if_error(read_output_and_wait(coder, allocator,
1781 out, out_pos, out_size,
1782 NULL, true, &wait_abs, &has_blocked));
1783
1784 // We get here only if the error happened in the main
1785 // thread, for example, unsupported Block Header.
1786 if (!lzma_outq_is_empty(&coder->outq))
1787 return LZMA_OK;
1788 }
1789
1790 // We only get here if no errors were detected by the worker
1791 // threads. Errors from worker threads would have already been
1792 // returned by the call to read_output_and_wait() above.
1793 return coder->pending_error;
1794
1795 default:
1796 assert(0);
1797 return LZMA_PROG_ERROR;
1798 }
1799
1800 // Never reached
1801}
1802
1803
1804static void
1805stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
1806{
1807 struct lzma_stream_coder *coder = coder_ptr;
1808
1809 threads_end(coder, allocator);
1810 lzma_outq_end(&coder->outq, allocator);
1811
1812 lzma_next_end(&coder->block_decoder, allocator);
1813 lzma_filters_free(coder->filters, allocator);
1814 lzma_index_hash_end(coder->index_hash, allocator);
1815
1816 lzma_free(coder, allocator);
1817 return;
1818}
1819
1820
1821static lzma_check
1822stream_decoder_mt_get_check(const void *coder_ptr)
1823{
1824 const struct lzma_stream_coder *coder = coder_ptr;
1825 return coder->stream_flags.check;
1826}
1827
1828
1829static lzma_ret
1830stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
1831 uint64_t *old_memlimit, uint64_t new_memlimit)
1832{
1833 // NOTE: This function gets/sets memlimit_stop. For now,
1834 // memlimit_threading cannot be modified after initialization.
1835 //
1836 // *memusage will include cached memory too. Excluding cached memory
1837 // would be misleading and it wouldn't help the applications to
1838 // know how much memory is actually needed to decompress the file
1839 // because the higher the number of threads and the memlimits are
1840 // the more memory the decoder may use.
1841 //
1842 // Setting a new limit includes the cached memory too and too low
1843 // limits will be rejected. Alternative could be to free the cached
1844 // memory immediately if that helps to bring the limit down but
1845 // the current way is the simplest. It's unlikely that limit needs
1846 // to be lowered in the middle of a file anyway; the typical reason
1847 // to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
1848 // and even such use isn't common.
1849 struct lzma_stream_coder *coder = coder_ptr;
1850
1851 mythread_sync(coder->mutex) {
1852 *memusage = coder->mem_direct_mode
1853 + coder->mem_in_use
1854 + coder->mem_cached
1855 + coder->outq.mem_allocated;
1856 }
1857
1858 // If no filter chains are allocated, *memusage may be zero.
1859 // Always return at least LZMA_MEMUSAGE_BASE.
1860 if (*memusage < LZMA_MEMUSAGE_BASE)
1861 *memusage = LZMA_MEMUSAGE_BASE;
1862
1863 *old_memlimit = coder->memlimit_stop;
1864
1865 if (new_memlimit != 0) {
1866 if (new_memlimit < *memusage)
1867 return LZMA_MEMLIMIT_ERROR;
1868
1869 coder->memlimit_stop = new_memlimit;
1870 }
1871
1872 return LZMA_OK;
1873}
1874
1875
1876static void
1877stream_decoder_mt_get_progress(void *coder_ptr,
1878 uint64_t *progress_in, uint64_t *progress_out)
1879{
1880 struct lzma_stream_coder *coder = coder_ptr;
1881
1882 // Lock coder->mutex to prevent finishing threads from moving their
1883 // progress info from the worker_thread structure to lzma_stream_coder.
1884 mythread_sync(coder->mutex) {
1885 *progress_in = coder->progress_in;
1886 *progress_out = coder->progress_out;
1887
1888 for (size_t i = 0; i < coder->threads_initialized; ++i) {
1889 mythread_sync(coder->threads[i].mutex) {
1890 *progress_in += coder->threads[i].progress_in;
1891 *progress_out += coder->threads[i]
1892 .progress_out;
1893 }
1894 }
1895 }
1896
1897 return;
1898}
1899
1900
1901static lzma_ret
1902stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
1903 const lzma_mt *options)
1904{
1905 struct lzma_stream_coder *coder;
1906
1907 if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
1908 return LZMA_OPTIONS_ERROR;
1909
1910 if (options->flags & ~LZMA_SUPPORTED_FLAGS)
1911 return LZMA_OPTIONS_ERROR;
1912
1913 lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
1914
1915 coder = next->coder;
1916 if (!coder) {
1917 coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
1918 if (coder == NULL)
1919 return LZMA_MEM_ERROR;
1920
1921 next->coder = coder;
1922
1923 if (mythread_mutex_init(&coder->mutex)) {
1924 lzma_free(coder, allocator);
1925 return LZMA_MEM_ERROR;
1926 }
1927
1928 if (mythread_cond_init(&coder->cond)) {
1929 mythread_mutex_destroy(&coder->mutex);
1930 lzma_free(coder, allocator);
1931 return LZMA_MEM_ERROR;
1932 }
1933
1934 next->code = &stream_decode_mt;
1935 next->end = &stream_decoder_mt_end;
1936 next->get_check = &stream_decoder_mt_get_check;
1937 next->memconfig = &stream_decoder_mt_memconfig;
1938 next->get_progress = &stream_decoder_mt_get_progress;
1939
1940 coder->filters[0].id = LZMA_VLI_UNKNOWN;
1941 memzero(&coder->outq, sizeof(coder->outq));
1942
1943 coder->block_decoder = LZMA_NEXT_CODER_INIT;
1944 coder->mem_direct_mode = 0;
1945
1946 coder->index_hash = NULL;
1947 coder->threads = NULL;
1948 coder->threads_free = NULL;
1949 coder->threads_initialized = 0;
1950 }
1951
1952 // Cleanup old filter chain if one remains after unfinished decoding
1953 // of a previous Stream.
1954 lzma_filters_free(coder->filters, allocator);
1955
1956 // By allocating threads from scratch we can start memory-usage
1957 // accounting from scratch, too. Changes in filter and block sizes may
1958 // affect number of threads.
1959 //
1960 // FIXME? Reusing should be easy but unlike the single-threaded
1961 // decoder, with some types of input file combinations reusing
1962 // could leave quite a lot of memory allocated but unused (first
1963 // file could allocate a lot, the next files could use fewer
1964 // threads and some of the allocations from the first file would not
1965 // get freed unless memlimit_threading forces us to clear caches).
1966 //
1967 // NOTE: The direct mode decoder isn't freed here if one exists.
1968 // It will be reused or freed as needed in the main loop.
1969 threads_end(coder, allocator);
1970
1971 // All memusage counters start at 0 (including mem_direct_mode).
1972 // The little extra that is needed for the structs in this file
1973 // get accounted well enough by the filter chain memory usage
1974 // which adds LZMA_MEMUSAGE_BASE for each chain. However,
1975 // stream_decoder_mt_memconfig() has to handle this specially so that
1976 // it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
1977 coder->mem_in_use = 0;
1978 coder->mem_cached = 0;
1979 coder->mem_next_block = 0;
1980
1981 coder->progress_in = 0;
1982 coder->progress_out = 0;
1983
1984 coder->sequence = SEQ_STREAM_HEADER;
1985 coder->thread_error = LZMA_OK;
1986 coder->pending_error = LZMA_OK;
1987 coder->thr = NULL;
1988
1989 coder->timeout = options->timeout;
1990
1991 coder->memlimit_threading = my_max(1, options->memlimit_threading);
1992 coder->memlimit_stop = my_max(1, options->memlimit_stop);
1993 if (coder->memlimit_threading > coder->memlimit_stop)
1994 coder->memlimit_threading = coder->memlimit_stop;
1995
1996 coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
1997 coder->tell_unsupported_check
1998 = (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
1999 coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
2000 coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
2001 coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
2002 coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
2003
2004 coder->first_stream = true;
2005 coder->out_was_filled = false;
2006 coder->pos = 0;
2007
2008 coder->threads_max = options->threads;
2009
2010 return_if_error(lzma_outq_init(&coder->outq, allocator,
2011 coder->threads_max));
2012
2013 return stream_decoder_reset(coder, allocator);
2014}
2015
2016
2017extern LZMA_API(lzma_ret)
2018lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
2019{
2020 lzma_next_strm_init(stream_decoder_mt_init, strm, options);
2021
2022 strm->internal->supported_actions[LZMA_RUN] = true;
2023 strm->internal->supported_actions[LZMA_FINISH] = true;
2024
2025 return LZMA_OK;
2026}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette