VirtualBox

source: vbox/trunk/src/libs/liblzma-5.8.1/common/stream_decoder_mt.c@ 108911

Last change on this file since 108911 was 108911, checked in by vboxsync, 4 weeks ago

libs/liblzma: Applied and adjusted our liblzma changes to 5.8.1 and export to OSE. jiraref:VBP-1635

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
  • Property sync-process set to export
File size: 63.7 KB
Line 
1// SPDX-License-Identifier: 0BSD
2
3///////////////////////////////////////////////////////////////////////////////
4//
5/// \file stream_decoder_mt.c
6/// \brief Multithreaded .xz Stream decoder
7//
8// Authors: Sebastian Andrzej Siewior
9// Lasse Collin
10//
11///////////////////////////////////////////////////////////////////////////////
12
13#include "common.h"
14#include "block_decoder.h"
15#include "stream_decoder.h"
16#include "index.h"
17#include "outqueue.h"
18
19
20typedef enum {
21 /// Waiting for work.
22 /// Main thread may change this to THR_RUN or THR_EXIT.
23 THR_IDLE,
24
25 /// Decoding is in progress.
26 /// Main thread may change this to THR_IDLE or THR_EXIT.
27 /// The worker thread may change this to THR_IDLE.
28 THR_RUN,
29
30 /// The main thread wants the thread to exit.
31 THR_EXIT,
32
33} worker_state;
34
35
36typedef enum {
37 /// Partial updates (storing of worker thread progress
38 /// to lzma_outbuf) are disabled.
39 PARTIAL_DISABLED,
40
41 /// Main thread requests partial updates to be enabled but
42 /// no partial update has been done by the worker thread yet.
43 ///
44 /// Changing from PARTIAL_DISABLED to PARTIAL_START requires
45 /// use of the worker-thread mutex. Other transitions don't
46 /// need a mutex.
47 PARTIAL_START,
48
49 /// Partial updates are enabled and the worker thread has done
50 /// at least one partial update.
51 PARTIAL_ENABLED,
52
53} partial_update_mode;
54
55
56struct worker_thread {
57 /// Worker state is protected with our mutex.
58 worker_state state;
59
60 /// Input buffer that will contain the whole Block except Block Header.
61 uint8_t *in;
62
63 /// Amount of memory allocated for "in"
64 size_t in_size;
65
66 /// Number of bytes written to "in" by the main thread
67 size_t in_filled;
68
69 /// Number of bytes consumed from "in" by the worker thread.
70 size_t in_pos;
71
72 /// Amount of uncompressed data that has been decoded. This local
73 /// copy is needed because updating outbuf->pos requires locking
74 /// the main mutex (coder->mutex).
75 size_t out_pos;
76
77 /// Pointer to the main structure is needed to (1) lock the main
78 /// mutex (coder->mutex) when updating outbuf->pos and (2) when
79 /// putting this thread back to the stack of free threads.
80 struct lzma_stream_coder *coder;
81
82 /// The allocator is set by the main thread. Since a copy of the
83 /// pointer is kept here, the application must not change the
84 /// allocator before calling lzma_end().
85 const lzma_allocator *allocator;
86
87 /// Output queue buffer to which the uncompressed data is written.
88 lzma_outbuf *outbuf;
89
90 /// Amount of compressed data that has already been decompressed.
91 /// This is updated from in_pos when our mutex is locked.
92 /// This is size_t, not uint64_t, because per-thread progress
93 /// is limited to sizes of allocated buffers.
94 size_t progress_in;
95
96 /// Like progress_in but for uncompressed data.
97 size_t progress_out;
98
99 /// Updating outbuf->pos requires locking the main mutex
100 /// (coder->mutex). Since the main thread will only read output
101 /// from the oldest outbuf in the queue, only the worker thread
102 /// that is associated with the oldest outbuf needs to update its
103 /// outbuf->pos. This avoids useless mutex contention that would
104 /// happen if all worker threads were frequently locking the main
105 /// mutex to update their outbuf->pos.
106 ///
107 /// Only when partial_update is something else than PARTIAL_DISABLED,
108 /// this worker thread will update outbuf->pos after each call to
109 /// the Block decoder.
110 partial_update_mode partial_update;
111
112 /// Block decoder
113 lzma_next_coder block_decoder;
114
115 /// Thread-specific Block options are needed because the Block
116 /// decoder modifies the struct given to it at initialization.
117 lzma_block block_options;
118
119 /// Filter chain memory usage
120 uint64_t mem_filters;
121
122 /// Next structure in the stack of free worker threads.
123 struct worker_thread *next;
124
125 mythread_mutex mutex;
126 mythread_cond cond;
127
128 /// The ID of this thread is used to join the thread
129 /// when it's not needed anymore.
130 mythread thread_id;
131};
132
133
134struct lzma_stream_coder {
135 enum {
136 SEQ_STREAM_HEADER,
137 SEQ_BLOCK_HEADER,
138 SEQ_BLOCK_INIT,
139 SEQ_BLOCK_THR_INIT,
140 SEQ_BLOCK_THR_RUN,
141 SEQ_BLOCK_DIRECT_INIT,
142 SEQ_BLOCK_DIRECT_RUN,
143 SEQ_INDEX_WAIT_OUTPUT,
144 SEQ_INDEX_DECODE,
145 SEQ_STREAM_FOOTER,
146 SEQ_STREAM_PADDING,
147 SEQ_ERROR,
148 } sequence;
149
150 /// Block decoder
151 lzma_next_coder block_decoder;
152
153 /// Every Block Header will be decoded into this structure.
154 /// This is also used to initialize a Block decoder when in
155 /// direct mode. In threaded mode, a thread-specific copy will
156 /// be made for decoder initialization because the Block decoder
157 /// will modify the structure given to it.
158 lzma_block block_options;
159
160 /// Buffer to hold a filter chain for Block Header decoding and
161 /// initialization. These are freed after successful Block decoder
162 /// initialization or at stream_decoder_mt_end(). The thread-specific
163 /// copy of block_options won't hold a pointer to filters[] after
164 /// initialization.
165 lzma_filter filters[LZMA_FILTERS_MAX + 1];
166
167 /// Stream Flags from Stream Header
168 lzma_stream_flags stream_flags;
169
170 /// Index is hashed so that it can be compared to the sizes of Blocks
171 /// with O(1) memory usage.
172 lzma_index_hash *index_hash;
173
174
175 /// Maximum wait time if cannot use all the input and cannot
176 /// fill the output buffer. This is in milliseconds.
177 uint32_t timeout;
178
179
180 /// Error code from a worker thread.
181 ///
182 /// \note Use mutex.
183 lzma_ret thread_error;
184
185 /// Error code to return after pending output has been copied out. If
186 /// set in read_output_and_wait(), this is a mirror of thread_error.
187 /// If set in stream_decode_mt() then it's, for example, error that
188 /// occurred when decoding Block Header.
189 lzma_ret pending_error;
190
191 /// Number of threads that will be created at maximum.
192 uint32_t threads_max;
193
194 /// Number of thread structures that have been initialized from
195 /// "threads", and thus the number of worker threads actually
196 /// created so far.
197 uint32_t threads_initialized;
198
199 /// Array of allocated thread-specific structures. When no threads
200 /// are in use (direct mode) this is NULL. In threaded mode this
201 /// points to an array of threads_max number of worker_thread structs.
202 struct worker_thread *threads;
203
204 /// Stack of free threads. When a thread finishes, it puts itself
205 /// back into this stack. This starts as empty because threads
206 /// are created only when actually needed.
207 ///
208 /// \note Use mutex.
209 struct worker_thread *threads_free;
210
211 /// The most recent worker thread to which the main thread writes
212 /// the new input from the application.
213 struct worker_thread *thr;
214
215 /// Output buffer queue for decompressed data from the worker threads
216 ///
217 /// \note Use mutex with operations that need it.
218 lzma_outq outq;
219
220 mythread_mutex mutex;
221 mythread_cond cond;
222
223
224 /// Memory usage that will not be exceeded in multi-threaded mode.
225 /// Single-threaded mode can exceed this even by a large amount.
226 uint64_t memlimit_threading;
227
228 /// Memory usage limit that should never be exceeded.
229 /// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
230 /// even in single-threaded mode without exceeding this limit.
231 uint64_t memlimit_stop;
232
233 /// Amount of memory in use by the direct mode decoder
234 /// (coder->block_decoder). In threaded mode this is 0.
235 uint64_t mem_direct_mode;
236
237 /// Amount of memory needed by the running worker threads.
238 /// This doesn't include the memory needed by the output buffer.
239 ///
240 /// \note Use mutex.
241 uint64_t mem_in_use;
242
243 /// Amount of memory used by the idle (cached) threads.
244 ///
245 /// \note Use mutex.
246 uint64_t mem_cached;
247
248
249 /// Amount of memory needed for the filter chain of the next Block.
250 uint64_t mem_next_filters;
251
252 /// Amount of memory needed for the thread-specific input buffer
253 /// for the next Block.
254 uint64_t mem_next_in;
255
256 /// Amount of memory actually needed to decode the next Block
257 /// in threaded mode. This is
258 /// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
259 uint64_t mem_next_block;
260
261
262 /// Amount of compressed data in Stream Header + Blocks that have
263 /// already been finished.
264 ///
265 /// \note Use mutex.
266 uint64_t progress_in;
267
268 /// Amount of uncompressed data in Blocks that have already
269 /// been finished.
270 ///
271 /// \note Use mutex.
272 uint64_t progress_out;
273
274
275 /// If true, LZMA_NO_CHECK is returned if the Stream has
276 /// no integrity check.
277 bool tell_no_check;
278
279 /// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
280 /// an integrity check that isn't supported by this liblzma build.
281 bool tell_unsupported_check;
282
283 /// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
284 bool tell_any_check;
285
286 /// If true, we will tell the Block decoder to skip calculating
287 /// and verifying the integrity check.
288 bool ignore_check;
289
290 /// If true, we will decode concatenated Streams that possibly have
291 /// Stream Padding between or after them. LZMA_STREAM_END is returned
292 /// once the application isn't giving us any new input (LZMA_FINISH),
293 /// and we aren't in the middle of a Stream, and possible
294 /// Stream Padding is a multiple of four bytes.
295 bool concatenated;
296
297 /// If true, we will return any errors immediately instead of first
298 /// producing all output before the location of the error.
299 bool fail_fast;
300
301
302 /// When decoding concatenated Streams, this is true as long as we
303 /// are decoding the first Stream. This is needed to avoid misleading
304 /// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
305 /// bytes.
306 bool first_stream;
307
308 /// This is used to track if the previous call to stream_decode_mt()
309 /// had output space (*out_pos < out_size) and managed to fill the
310 /// output buffer (*out_pos == out_size). This may be set to true
311 /// in read_output_and_wait(). This is read and then reset to false
312 /// at the beginning of stream_decode_mt().
313 ///
314 /// This is needed to support applications that call lzma_code() in
315 /// such a way that more input is provided only when lzma_code()
316 /// didn't fill the output buffer completely. Basically, this makes
317 /// it easier to convert such applications from single-threaded
318 /// decoder to multi-threaded decoder.
319 bool out_was_filled;
320
321 /// Write position in buffer[] and position in Stream Padding
322 size_t pos;
323
324 /// Buffer to hold Stream Header, Block Header, and Stream Footer.
325 /// Block Header has biggest maximum size.
326 uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
327};
328
329
330/// Enables updating of outbuf->pos. This is a callback function that is
331/// used with lzma_outq_enable_partial_output().
332static void
333worker_enable_partial_update(void *thr_ptr)
334{
335 struct worker_thread *thr = thr_ptr;
336
337 mythread_sync(thr->mutex) {
338 thr->partial_update = PARTIAL_START;
339 mythread_cond_signal(&thr->cond);
340 }
341}
342
343
344static MYTHREAD_RET_TYPE
345#ifndef VBOX
346worker_decoder(void *thr_ptr)
347#else
348worker_decoder(RTTHREAD hThread, void *thr_ptr)
349#endif
350{
351 struct worker_thread *thr = thr_ptr;
352 size_t in_filled;
353 partial_update_mode partial_update;
354 lzma_ret ret;
355
356next_loop_lock:
357
358 mythread_mutex_lock(&thr->mutex);
359next_loop_unlocked:
360
361 if (thr->state == THR_IDLE) {
362 mythread_cond_wait(&thr->cond, &thr->mutex);
363 goto next_loop_unlocked;
364 }
365
366 if (thr->state == THR_EXIT) {
367 mythread_mutex_unlock(&thr->mutex);
368
369 lzma_free(thr->in, thr->allocator);
370 lzma_next_end(&thr->block_decoder, thr->allocator);
371
372 mythread_mutex_destroy(&thr->mutex);
373 mythread_cond_destroy(&thr->cond);
374
375 return MYTHREAD_RET_VALUE;
376 }
377
378 assert(thr->state == THR_RUN);
379
380 // Update progress info for get_progress().
381 thr->progress_in = thr->in_pos;
382 thr->progress_out = thr->out_pos;
383
384 // If we don't have any new input, wait for a signal from the main
385 // thread except if partial output has just been enabled. In that
386 // case we will do one normal run so that the partial output info
387 // gets passed to the main thread. The call to block_decoder.code()
388 // is useless but harmless as it can occur only once per Block.
389 in_filled = thr->in_filled;
390 partial_update = thr->partial_update;
391
392 if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
393 mythread_cond_wait(&thr->cond, &thr->mutex);
394 goto next_loop_unlocked;
395 }
396
397 mythread_mutex_unlock(&thr->mutex);
398
399 // Pass the input in small chunks to the Block decoder.
400 // This way we react reasonably fast if we are told to stop/exit,
401 // and (when partial update is enabled) we tell about our progress
402 // to the main thread frequently enough.
403 const size_t chunk_size = 16384;
404 if ((in_filled - thr->in_pos) > chunk_size)
405 in_filled = thr->in_pos + chunk_size;
406
407 ret = thr->block_decoder.code(
408 thr->block_decoder.coder, thr->allocator,
409 thr->in, &thr->in_pos, in_filled,
410 thr->outbuf->buf, &thr->out_pos,
411 thr->outbuf->allocated, LZMA_RUN);
412
413 if (ret == LZMA_OK) {
414 if (partial_update != PARTIAL_DISABLED) {
415 // The main thread uses thr->mutex to change from
416 // PARTIAL_DISABLED to PARTIAL_START. The main thread
417 // doesn't care about this variable after that so we
418 // can safely change it here to PARTIAL_ENABLED
419 // without a mutex.
420 thr->partial_update = PARTIAL_ENABLED;
421
422 // The main thread is reading decompressed data
423 // from thr->outbuf. Tell the main thread about
424 // our progress.
425 //
426 // NOTE: It's possible that we consumed input without
427 // producing any new output so it's possible that
428 // only in_pos has changed. In case of PARTIAL_START
429 // it is possible that neither in_pos nor out_pos has
430 // changed.
431 mythread_sync(thr->coder->mutex) {
432 thr->outbuf->pos = thr->out_pos;
433 thr->outbuf->decoder_in_pos = thr->in_pos;
434 mythread_cond_signal(&thr->coder->cond);
435 }
436 }
437
438 goto next_loop_lock;
439 }
440
441 // Either we finished successfully (LZMA_STREAM_END) or an error
442 // occurred.
443 //
444 // The sizes are in the Block Header and the Block decoder
445 // checks that they match, thus we know these:
446 assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
447 assert(ret != LZMA_STREAM_END
448 || thr->out_pos == thr->block_options.uncompressed_size);
449
450 mythread_sync(thr->mutex) {
451 // Block decoder ensures this, but do a sanity check anyway
452 // because thr->in_filled < thr->in_size means that the main
453 // thread is still writing to thr->in.
454 if (ret == LZMA_STREAM_END && thr->in_filled != thr->in_size) {
455 assert(0);
456 ret = LZMA_PROG_ERROR;
457 }
458
459 if (thr->state != THR_EXIT)
460 thr->state = THR_IDLE;
461 }
462
463 // Free the input buffer. Don't update in_size as we need
464 // it later to update thr->coder->mem_in_use.
465 //
466 // This step is skipped if an error occurred because the main thread
467 // might still be writing to thr->in. The memory will be freed after
468 // threads_end() sets thr->state = THR_EXIT.
469 if (ret == LZMA_STREAM_END) {
470 lzma_free(thr->in, thr->allocator);
471 thr->in = NULL;
472 }
473
474 mythread_sync(thr->coder->mutex) {
475 // Move our progress info to the main thread.
476 thr->coder->progress_in += thr->in_pos;
477 thr->coder->progress_out += thr->out_pos;
478 thr->progress_in = 0;
479 thr->progress_out = 0;
480
481 // Mark the outbuf as finished.
482 thr->outbuf->pos = thr->out_pos;
483 thr->outbuf->decoder_in_pos = thr->in_pos;
484 thr->outbuf->finished = true;
485 thr->outbuf->finish_ret = ret;
486 thr->outbuf = NULL;
487
488 // If an error occurred, tell it to the main thread.
489 if (ret != LZMA_STREAM_END
490 && thr->coder->thread_error == LZMA_OK)
491 thr->coder->thread_error = ret;
492
493 // Return the worker thread to the stack of available
494 // threads only if no errors occurred.
495 if (ret == LZMA_STREAM_END) {
496 // Update memory usage counters.
497 thr->coder->mem_in_use -= thr->in_size;
498 thr->coder->mem_in_use -= thr->mem_filters;
499 thr->coder->mem_cached += thr->mem_filters;
500
501 // Put this thread to the stack of free threads.
502 thr->next = thr->coder->threads_free;
503 thr->coder->threads_free = thr;
504 }
505
506 mythread_cond_signal(&thr->coder->cond);
507 }
508
509 goto next_loop_lock;
510}
511
512
513/// Tells the worker threads to exit and waits for them to terminate.
514static void
515threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
516{
517 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
518 mythread_sync(coder->threads[i].mutex) {
519 coder->threads[i].state = THR_EXIT;
520 mythread_cond_signal(&coder->threads[i].cond);
521 }
522 }
523
524 for (uint32_t i = 0; i < coder->threads_initialized; ++i)
525 mythread_join(coder->threads[i].thread_id);
526
527 lzma_free(coder->threads, allocator);
528 coder->threads_initialized = 0;
529 coder->threads = NULL;
530 coder->threads_free = NULL;
531
532 // The threads don't update these when they exit. Do it here.
533 coder->mem_in_use = 0;
534 coder->mem_cached = 0;
535
536 return;
537}
538
539
540/// Tell worker threads to stop without doing any cleaning up.
541/// The clean up will be done when threads_exit() is called;
542/// it's not possible to reuse the threads after threads_stop().
543///
544/// This is called before returning an unrecoverable error code
545/// to the application. It would be waste of processor time
546/// to keep the threads running in such a situation.
547static void
548threads_stop(struct lzma_stream_coder *coder)
549{
550 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
551 // The threads that are in the THR_RUN state will stop
552 // when they check the state the next time. There's no
553 // need to signal coder->threads[i].cond.
554 mythread_sync(coder->threads[i].mutex) {
555 coder->threads[i].state = THR_IDLE;
556 }
557 }
558
559 return;
560}
561
562
563/// Initialize a new worker_thread structure and create a new thread.
564static lzma_ret
565initialize_new_thread(struct lzma_stream_coder *coder,
566 const lzma_allocator *allocator)
567{
568 // Allocate the coder->threads array if needed. It's done here instead
569 // of when initializing the decoder because we don't need this if we
570 // use the direct mode (we may even free coder->threads in the middle
571 // of the file if we switch from threaded to direct mode).
572 if (coder->threads == NULL) {
573 coder->threads = lzma_alloc(
574 coder->threads_max * sizeof(struct worker_thread),
575 allocator);
576
577 if (coder->threads == NULL)
578 return LZMA_MEM_ERROR;
579 }
580
581 // Pick a free structure.
582 assert(coder->threads_initialized < coder->threads_max);
583 struct worker_thread *thr
584 = &coder->threads[coder->threads_initialized];
585
586 if (mythread_mutex_init(&thr->mutex))
587 goto error_mutex;
588
589 if (mythread_cond_init(&thr->cond))
590 goto error_cond;
591
592 thr->state = THR_IDLE;
593 thr->in = NULL;
594 thr->in_size = 0;
595 thr->allocator = allocator;
596 thr->coder = coder;
597 thr->outbuf = NULL;
598 thr->block_decoder = LZMA_NEXT_CODER_INIT;
599 thr->mem_filters = 0;
600
601 if (mythread_create(&thr->thread_id, worker_decoder, thr))
602 goto error_thread;
603
604 ++coder->threads_initialized;
605 coder->thr = thr;
606
607 return LZMA_OK;
608
609error_thread:
610 mythread_cond_destroy(&thr->cond);
611
612error_cond:
613 mythread_mutex_destroy(&thr->mutex);
614
615error_mutex:
616 return LZMA_MEM_ERROR;
617}
618
619
620static lzma_ret
621get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
622{
623 // If there is a free structure on the stack, use it.
624 mythread_sync(coder->mutex) {
625 if (coder->threads_free != NULL) {
626 coder->thr = coder->threads_free;
627 coder->threads_free = coder->threads_free->next;
628
629 // The thread is no longer in the cache so subtract
630 // it from the cached memory usage. Don't add it
631 // to mem_in_use though; the caller will handle it
632 // since it knows how much memory it will actually
633 // use (the filter chain might change).
634 coder->mem_cached -= coder->thr->mem_filters;
635 }
636 }
637
638 if (coder->thr == NULL) {
639 assert(coder->threads_initialized < coder->threads_max);
640
641 // Initialize a new thread.
642 return_if_error(initialize_new_thread(coder, allocator));
643 }
644
645 coder->thr->in_filled = 0;
646 coder->thr->in_pos = 0;
647 coder->thr->out_pos = 0;
648
649 coder->thr->progress_in = 0;
650 coder->thr->progress_out = 0;
651
652 coder->thr->partial_update = PARTIAL_DISABLED;
653
654 return LZMA_OK;
655}
656
657
658static lzma_ret
659read_output_and_wait(struct lzma_stream_coder *coder,
660 const lzma_allocator *allocator,
661 uint8_t *restrict out, size_t *restrict out_pos,
662 size_t out_size,
663 bool *input_is_possible,
664 bool waiting_allowed,
665 mythread_condtime *wait_abs, bool *has_blocked)
666{
667 lzma_ret ret = LZMA_OK;
668
669 mythread_sync(coder->mutex) {
670 do {
671 // Get as much output from the queue as is possible
672 // without blocking.
673 const size_t out_start = *out_pos;
674 do {
675 ret = lzma_outq_read(&coder->outq, allocator,
676 out, out_pos, out_size,
677 NULL, NULL);
678
679 // If a Block was finished, tell the worker
680 // thread of the next Block (if it is still
681 // running) to start telling the main thread
682 // when new output is available.
683 if (ret == LZMA_STREAM_END)
684 lzma_outq_enable_partial_output(
685 &coder->outq,
686 &worker_enable_partial_update);
687
688 // Loop until a Block wasn't finished.
689 // It's important to loop around even if
690 // *out_pos == out_size because there could
691 // be an empty Block that will return
692 // LZMA_STREAM_END without needing any
693 // output space.
694 } while (ret == LZMA_STREAM_END);
695
696 // Check if lzma_outq_read reported an error from
697 // the Block decoder.
698 if (ret != LZMA_OK)
699 break;
700
701 // If the output buffer is now full but it wasn't full
702 // when this function was called, set out_was_filled.
703 // This way the next call to stream_decode_mt() knows
704 // that some output was produced and no output space
705 // remained in the previous call to stream_decode_mt().
706 if (*out_pos == out_size && *out_pos != out_start)
707 coder->out_was_filled = true;
708
709 // Check if any thread has indicated an error.
710 if (coder->thread_error != LZMA_OK) {
711 // If LZMA_FAIL_FAST was used, report errors
712 // from worker threads immediately.
713 if (coder->fail_fast) {
714 ret = coder->thread_error;
715 break;
716 }
717
718 // Otherwise set pending_error. The value we
719 // set here will not actually get used other
720 // than working as a flag that an error has
721 // occurred. This is because in SEQ_ERROR
722 // all output before the error will be read
723 // first by calling this function, and once we
724 // reach the location of the (first) error the
725 // error code from the above lzma_outq_read()
726 // will be returned to the application.
727 //
728 // Use LZMA_PROG_ERROR since the value should
729 // never leak to the application. It's
730 // possible that pending_error has already
731 // been set but that doesn't matter: if we get
732 // here, pending_error only works as a flag.
733 coder->pending_error = LZMA_PROG_ERROR;
734 }
735
736 // Check if decoding of the next Block can be started.
737 // The memusage of the active threads must be low
738 // enough, there must be a free buffer slot in the
739 // output queue, and there must be a free thread
740 // (that can be either created or an existing one
741 // reused).
742 //
743 // NOTE: This is checked after reading the output
744 // above because reading the output can free a slot in
745 // the output queue and also reduce active memusage.
746 //
747 // NOTE: If output queue is empty, then input will
748 // always be possible.
749 if (input_is_possible != NULL
750 && coder->memlimit_threading
751 - coder->mem_in_use
752 - coder->outq.mem_in_use
753 >= coder->mem_next_block
754 && lzma_outq_has_buf(&coder->outq)
755 && (coder->threads_initialized
756 < coder->threads_max
757 || coder->threads_free
758 != NULL)) {
759 *input_is_possible = true;
760 break;
761 }
762
763 // If the caller doesn't want us to block, return now.
764 if (!waiting_allowed)
765 break;
766
767 // This check is needed only when input_is_possible
768 // is NULL. We must return if we aren't waiting for
769 // input to become possible and there is no more
770 // output coming from the queue.
771 if (lzma_outq_is_empty(&coder->outq)) {
772 assert(input_is_possible == NULL);
773 break;
774 }
775
776 // If there is more data available from the queue,
777 // our out buffer must be full and we need to return
778 // so that the application can provide more output
779 // space.
780 //
781 // NOTE: In general lzma_outq_is_readable() can return
782 // true also when there are no more bytes available.
783 // This can happen when a Block has finished without
784 // providing any new output. We know that this is not
785 // the case because in the beginning of this loop we
786 // tried to read as much as possible even when we had
787 // no output space left and the mutex has been locked
788 // all the time (so worker threads cannot have changed
789 // anything). Thus there must be actual pending output
790 // in the queue.
791 if (lzma_outq_is_readable(&coder->outq)) {
792 assert(*out_pos == out_size);
793 break;
794 }
795
796 // If the application stops providing more input
797 // in the middle of a Block, there will eventually
798 // be one worker thread left that is stuck waiting for
799 // more input (that might never arrive) and a matching
800 // outbuf which the worker thread cannot finish due
801 // to lack of input. We must detect this situation,
802 // otherwise we would end up waiting indefinitely
803 // (if no timeout is in use) or keep returning
804 // LZMA_TIMED_OUT while making no progress. Thus, the
805 // application would never get LZMA_BUF_ERROR from
806 // lzma_code() which would tell the application that
807 // no more progress is possible. No LZMA_BUF_ERROR
808 // means that, for example, truncated .xz files could
809 // cause an infinite loop.
810 //
811 // A worker thread doing partial updates will
812 // store not only the output position in outbuf->pos
813 // but also the matching input position in
814 // outbuf->decoder_in_pos. Here we check if that
815 // input position matches the amount of input that
816 // the worker thread has been given (in_filled).
817 // If so, we must return and not wait as no more
818 // output will be coming without first getting more
819 // input to the worker thread. If the application
820 // keeps calling lzma_code() without providing more
821 // input, it will eventually get LZMA_BUF_ERROR.
822 //
823 // NOTE: We can read partial_update and in_filled
824 // without thr->mutex as only the main thread
825 // modifies these variables. decoder_in_pos requires
826 // coder->mutex which we are already holding.
827 if (coder->thr != NULL && coder->thr->partial_update
828 != PARTIAL_DISABLED) {
829 // There is exactly one outbuf in the queue.
830 assert(coder->thr->outbuf == coder->outq.head);
831 assert(coder->thr->outbuf == coder->outq.tail);
832
833 if (coder->thr->outbuf->decoder_in_pos
834 == coder->thr->in_filled)
835 break;
836 }
837
838 // Wait for input or output to become possible.
839 if (coder->timeout != 0) {
840 // See the comment in stream_encoder_mt.c
841 // about why mythread_condtime_set() is used
842 // like this.
843 //
844 // FIXME?
845 // In contrast to the encoder, this calls
846 // _condtime_set while the mutex is locked.
847 if (!*has_blocked) {
848 *has_blocked = true;
849 mythread_condtime_set(wait_abs,
850 &coder->cond,
851 coder->timeout);
852 }
853
854 if (mythread_cond_timedwait(&coder->cond,
855 &coder->mutex,
856 wait_abs) != 0) {
857 ret = LZMA_TIMED_OUT;
858 break;
859 }
860 } else {
861 mythread_cond_wait(&coder->cond,
862 &coder->mutex);
863 }
864 } while (ret == LZMA_OK);
865 }
866
867 // If we are returning an error, then the application cannot get
868 // more output from us and thus keeping the threads running is
869 // useless and waste of CPU time.
870 if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
871 threads_stop(coder);
872
873 return ret;
874}
875
876
877static lzma_ret
878decode_block_header(struct lzma_stream_coder *coder,
879 const lzma_allocator *allocator, const uint8_t *restrict in,
880 size_t *restrict in_pos, size_t in_size)
881{
882 if (*in_pos >= in_size)
883 return LZMA_OK;
884
885 if (coder->pos == 0) {
886 // Detect if it's Index.
887 if (in[*in_pos] == INDEX_INDICATOR)
888 return LZMA_INDEX_DETECTED;
889
890 // Calculate the size of the Block Header. Note that
891 // Block Header decoder wants to see this byte too
892 // so don't advance *in_pos.
893 coder->block_options.header_size
894 = lzma_block_header_size_decode(
895 in[*in_pos]);
896 }
897
898 // Copy the Block Header to the internal buffer.
899 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
900 coder->block_options.header_size);
901
902 // Return if we didn't get the whole Block Header yet.
903 if (coder->pos < coder->block_options.header_size)
904 return LZMA_OK;
905
906 coder->pos = 0;
907
908 // Version 1 is needed to support the .ignore_check option.
909 coder->block_options.version = 1;
910
911 // Block Header decoder will initialize all members of this array
912 // so we don't need to do it here.
913 coder->block_options.filters = coder->filters;
914
915 // Decode the Block Header.
916 return_if_error(lzma_block_header_decode(&coder->block_options,
917 allocator, coder->buffer));
918
919 // If LZMA_IGNORE_CHECK was used, this flag needs to be set.
920 // It has to be set after lzma_block_header_decode() because
921 // it always resets this to false.
922 coder->block_options.ignore_check = coder->ignore_check;
923
924 // coder->block_options is ready now.
925 return LZMA_STREAM_END;
926}
927
928
929/// Get the size of the Compressed Data + Block Padding + Check.
930static size_t
931comp_blk_size(const struct lzma_stream_coder *coder)
932{
933 return vli_ceil4(coder->block_options.compressed_size)
934 + lzma_check_size(coder->stream_flags.check);
935}
936
937
938/// Returns true if the size (compressed or uncompressed) is such that
939/// threaded decompression cannot be used. Sizes that are too big compared
940/// to SIZE_MAX must be rejected to avoid integer overflows and truncations
941/// when lzma_vli is assigned to a size_t.
942static bool
943is_direct_mode_needed(lzma_vli size)
944{
945 return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
946}
947
948
949static lzma_ret
950stream_decoder_reset(struct lzma_stream_coder *coder,
951 const lzma_allocator *allocator)
952{
953 // Initialize the Index hash used to verify the Index.
954 coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
955 if (coder->index_hash == NULL)
956 return LZMA_MEM_ERROR;
957
958 // Reset the rest of the variables.
959 coder->sequence = SEQ_STREAM_HEADER;
960 coder->pos = 0;
961
962 return LZMA_OK;
963}
964
965
966static lzma_ret
967stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
968 const uint8_t *restrict in, size_t *restrict in_pos,
969 size_t in_size,
970 uint8_t *restrict out, size_t *restrict out_pos,
971 size_t out_size, lzma_action action)
972{
973 struct lzma_stream_coder *coder = coder_ptr;
974
975 mythread_condtime wait_abs;
976 bool has_blocked = false;
977
978 // Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
979 // tell read_output_and_wait() to wait until it can fill the output
980 // buffer (or a timeout occurs). Two conditions must be met:
981 //
982 // (1) If the caller provided no new input. The reason for this
983 // can be, for example, the end of the file or that there is
984 // a pause in the input stream and more input is available
985 // a little later. In this situation we should wait for output
986 // because otherwise we would end up in a busy-waiting loop where
987 // we make no progress and the application just calls us again
988 // without providing any new input. This would then result in
989 // LZMA_BUF_ERROR even though more output would be available
990 // once the worker threads decode more data.
991 //
992 // (2) Even if (1) is true, we will not wait if the previous call to
993 // this function managed to produce some output and the output
994 // buffer became full. This is for compatibility with applications
995 // that call lzma_code() in such a way that new input is provided
996 // only when the output buffer didn't become full. Without this
997 // trick such applications would have bad performance (bad
998 // parallelization due to decoder not getting input fast enough).
999 //
1000 // NOTE: Such loops might require that timeout is disabled (0)
1001 // if they assume that output-not-full implies that all input has
1002 // been consumed. If and only if timeout is enabled, we may return
1003 // when output isn't full *and* not all input has been consumed.
1004 //
1005 // However, if LZMA_FINISH is used, the above is ignored and we always
1006 // wait (timeout can still cause us to return) because we know that
1007 // we won't get any more input. This matters if the input file is
1008 // truncated and we are doing single-shot decoding, that is,
1009 // timeout = 0 and LZMA_FINISH is used on the first call to
1010 // lzma_code() and the output buffer is known to be big enough
1011 // to hold all uncompressed data:
1012 //
1013 // - If LZMA_FINISH wasn't handled specially, we could return
1014 // LZMA_OK before providing all output that is possible with the
1015 // truncated input. The rest would be available if lzma_code() was
1016 // called again but then it's not single-shot decoding anymore.
1017 //
1018 // - By handling LZMA_FINISH specially here, the first call will
1019 // produce all the output, matching the behavior of the
1020 // single-threaded decoder.
1021 //
1022 // So it's a very specific corner case but also easy to avoid. Note
1023 // that this special handling of LZMA_FINISH has no effect for
1024 // single-shot decoding when the input file is valid (not truncated);
1025 // premature LZMA_OK wouldn't be possible as long as timeout = 0.
1026 const bool waiting_allowed = action == LZMA_FINISH
1027 || (*in_pos == in_size && !coder->out_was_filled);
1028 coder->out_was_filled = false;
1029
1030 while (true)
1031 switch (coder->sequence) {
1032 case SEQ_STREAM_HEADER: {
1033 // Copy the Stream Header to the internal buffer.
1034 const size_t in_old = *in_pos;
1035 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1036 LZMA_STREAM_HEADER_SIZE);
1037 coder->progress_in += *in_pos - in_old;
1038
1039 // Return if we didn't get the whole Stream Header yet.
1040 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1041 return LZMA_OK;
1042
1043 coder->pos = 0;
1044
1045 // Decode the Stream Header.
1046 const lzma_ret ret = lzma_stream_header_decode(
1047 &coder->stream_flags, coder->buffer);
1048 if (ret != LZMA_OK)
1049 return ret == LZMA_FORMAT_ERROR && !coder->first_stream
1050 ? LZMA_DATA_ERROR : ret;
1051
1052 // If we are decoding concatenated Streams, and the later
1053 // Streams have invalid Header Magic Bytes, we give
1054 // LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
1055 coder->first_stream = false;
1056
1057 // Copy the type of the Check so that Block Header and Block
1058 // decoders see it.
1059 coder->block_options.check = coder->stream_flags.check;
1060
1061 // Even if we return LZMA_*_CHECK below, we want
1062 // to continue from Block Header decoding.
1063 coder->sequence = SEQ_BLOCK_HEADER;
1064
1065 // Detect if there's no integrity check or if it is
1066 // unsupported if those were requested by the application.
1067 if (coder->tell_no_check && coder->stream_flags.check
1068 == LZMA_CHECK_NONE)
1069 return LZMA_NO_CHECK;
1070
1071 if (coder->tell_unsupported_check
1072 && !lzma_check_is_supported(
1073 coder->stream_flags.check))
1074 return LZMA_UNSUPPORTED_CHECK;
1075
1076 if (coder->tell_any_check)
1077 return LZMA_GET_CHECK;
1078
1079 FALLTHROUGH;
1080 }
1081
1082 case SEQ_BLOCK_HEADER: {
1083 const size_t in_old = *in_pos;
1084 const lzma_ret ret = decode_block_header(coder, allocator,
1085 in, in_pos, in_size);
1086 coder->progress_in += *in_pos - in_old;
1087
1088 if (ret == LZMA_OK) {
1089 // We didn't decode the whole Block Header yet.
1090 //
1091 // Read output from the queue before returning. This
1092 // is important because it is possible that the
1093 // application doesn't have any new input available
1094 // immediately. If we didn't try to copy output from
1095 // the output queue here, lzma_code() could end up
1096 // returning LZMA_BUF_ERROR even though queued output
1097 // is available.
1098 //
1099 // If the lzma_code() call provided at least one input
1100 // byte, only copy as much data from the output queue
1101 // as is available immediately. This way the
1102 // application will be able to provide more input
1103 // without a delay.
1104 //
1105 // On the other hand, if lzma_code() was called with
1106 // an empty input buffer(*), treat it specially: try
1107 // to fill the output buffer even if it requires
1108 // waiting for the worker threads to provide output
1109 // (timeout, if specified, can still cause us to
1110 // return).
1111 //
1112 // - This way the application will be able to get all
1113 // data that can be decoded from the input provided
1114 // so far.
1115 //
1116 // - We avoid both premature LZMA_BUF_ERROR and
1117 // busy-waiting where the application repeatedly
1118 // calls lzma_code() which immediately returns
1119 // LZMA_OK without providing new data.
1120 //
1121 // - If the queue becomes empty, we won't wait
1122 // anything and will return LZMA_OK immediately
1123 // (coder->timeout is completely ignored).
1124 //
1125 // (*) See the comment at the beginning of this
1126 // function how waiting_allowed is determined
1127 // and why there is an exception to the rule
1128 // of "called with an empty input buffer".
1129 assert(*in_pos == in_size);
1130
1131 // If LZMA_FINISH was used we know that we won't get
1132 // more input, so the file must be truncated if we
1133 // get here. If worker threads don't detect any
1134 // errors, eventually there will be no more output
1135 // while we keep returning LZMA_OK which gets
1136 // converted to LZMA_BUF_ERROR in lzma_code().
1137 //
1138 // If fail-fast is enabled then we will return
1139 // immediately using LZMA_DATA_ERROR instead of
1140 // LZMA_OK or LZMA_BUF_ERROR. Rationale for the
1141 // error code:
1142 //
1143 // - Worker threads may have a large amount of
1144 // not-yet-decoded input data and we don't
1145 // know for sure if all data is valid. Bad
1146 // data there would result in LZMA_DATA_ERROR
1147 // when fail-fast isn't used.
1148 //
1149 // - Immediate LZMA_BUF_ERROR would be a bit weird
1150 // considering the older liblzma code. lzma_code()
1151 // even has an assertion to prevent coders from
1152 // returning LZMA_BUF_ERROR directly.
1153 //
1154 // The downside of this is that with fail-fast apps
1155 // cannot always distinguish between corrupt and
1156 // truncated files.
1157 if (action == LZMA_FINISH && coder->fail_fast) {
1158 // We won't produce any more output. Stop
1159 // the unfinished worker threads so they
1160 // won't waste CPU time.
1161 threads_stop(coder);
1162 return LZMA_DATA_ERROR;
1163 }
1164
1165 // read_output_and_wait() will call threads_stop()
1166 // if needed so with that we can use return_if_error.
1167 return_if_error(read_output_and_wait(coder, allocator,
1168 out, out_pos, out_size,
1169 NULL, waiting_allowed,
1170 &wait_abs, &has_blocked));
1171
1172 if (coder->pending_error != LZMA_OK) {
1173 coder->sequence = SEQ_ERROR;
1174 break;
1175 }
1176
1177 return LZMA_OK;
1178 }
1179
1180 if (ret == LZMA_INDEX_DETECTED) {
1181 coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
1182 break;
1183 }
1184
1185 // See if an error occurred.
1186 if (ret != LZMA_STREAM_END) {
1187 // NOTE: Here and in all other places where
1188 // pending_error is set, it may overwrite the value
1189 // (LZMA_PROG_ERROR) set by read_output_and_wait().
1190 // That function might overwrite value set here too.
1191 // These are fine because when read_output_and_wait()
1192 // sets pending_error, it actually works as a flag
1193 // variable only ("some error has occurred") and the
1194 // actual value of pending_error is not used in
1195 // SEQ_ERROR. In such cases SEQ_ERROR will eventually
1196 // get the correct error code from the return value of
1197 // a later read_output_and_wait() call.
1198 coder->pending_error = ret;
1199 coder->sequence = SEQ_ERROR;
1200 break;
1201 }
1202
1203 // Calculate the memory usage of the filters / Block decoder.
1204 coder->mem_next_filters = lzma_raw_decoder_memusage(
1205 coder->filters);
1206
1207 if (coder->mem_next_filters == UINT64_MAX) {
1208 // One or more unknown Filter IDs.
1209 coder->pending_error = LZMA_OPTIONS_ERROR;
1210 coder->sequence = SEQ_ERROR;
1211 break;
1212 }
1213
1214 coder->sequence = SEQ_BLOCK_INIT;
1215 FALLTHROUGH;
1216 }
1217
1218 case SEQ_BLOCK_INIT: {
1219 // Check if decoding is possible at all with the current
1220 // memlimit_stop which we must never exceed.
1221 //
1222 // This needs to be the first thing in SEQ_BLOCK_INIT
1223 // to make it possible to restart decoding after increasing
1224 // memlimit_stop with lzma_memlimit_set().
1225 if (coder->mem_next_filters > coder->memlimit_stop) {
1226 // Flush pending output before returning
1227 // LZMA_MEMLIMIT_ERROR. If the application doesn't
1228 // want to increase the limit, at least it will get
1229 // all the output possible so far.
1230 return_if_error(read_output_and_wait(coder, allocator,
1231 out, out_pos, out_size,
1232 NULL, true, &wait_abs, &has_blocked));
1233
1234 if (!lzma_outq_is_empty(&coder->outq))
1235 return LZMA_OK;
1236
1237 return LZMA_MEMLIMIT_ERROR;
1238 }
1239
1240 // Check if the size information is available in Block Header.
1241 // If it is, check if the sizes are small enough that we don't
1242 // need to worry *too* much about integer overflows later in
1243 // the code. If these conditions are not met, we must use the
1244 // single-threaded direct mode.
1245 if (is_direct_mode_needed(coder->block_options.compressed_size)
1246 || is_direct_mode_needed(
1247 coder->block_options.uncompressed_size)) {
1248 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1249 break;
1250 }
1251
1252 // Calculate the amount of memory needed for the input and
1253 // output buffers in threaded mode.
1254 //
1255 // These cannot overflow because we already checked that
1256 // the sizes are small enough using is_direct_mode_needed().
1257 coder->mem_next_in = comp_blk_size(coder);
1258 const uint64_t mem_buffers = coder->mem_next_in
1259 + lzma_outq_outbuf_memusage(
1260 coder->block_options.uncompressed_size);
1261
1262 // Add the amount needed by the filters.
1263 // Avoid integer overflows.
1264 if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
1265 // Use direct mode if the memusage would overflow.
1266 // This is a theoretical case that shouldn't happen
1267 // in practice unless the input file is weird (broken
1268 // or malicious).
1269 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1270 break;
1271 }
1272
1273 // Amount of memory needed to decode this Block in
1274 // threaded mode:
1275 coder->mem_next_block = coder->mem_next_filters + mem_buffers;
1276
1277 // If this alone would exceed memlimit_threading, then we must
1278 // use the single-threaded direct mode.
1279 if (coder->mem_next_block > coder->memlimit_threading) {
1280 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1281 break;
1282 }
1283
1284 // Use the threaded mode. Free the direct mode decoder in
1285 // case it has been initialized.
1286 lzma_next_end(&coder->block_decoder, allocator);
1287 coder->mem_direct_mode = 0;
1288
1289 // Since we already know what the sizes are supposed to be,
1290 // we can already add them to the Index hash. The Block
1291 // decoder will verify the values while decoding.
1292 const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
1293 lzma_block_unpadded_size(
1294 &coder->block_options),
1295 coder->block_options.uncompressed_size);
1296 if (ret != LZMA_OK) {
1297 coder->pending_error = ret;
1298 coder->sequence = SEQ_ERROR;
1299 break;
1300 }
1301
1302 coder->sequence = SEQ_BLOCK_THR_INIT;
1303 FALLTHROUGH;
1304 }
1305
1306 case SEQ_BLOCK_THR_INIT: {
1307 // We need to wait for a multiple conditions to become true
1308 // until we can initialize the Block decoder and let a worker
1309 // thread decode it:
1310 //
1311 // - Wait for the memory usage of the active threads to drop
1312 // so that starting the decoding of this Block won't make
1313 // us go over memlimit_threading.
1314 //
1315 // - Wait for at least one free output queue slot.
1316 //
1317 // - Wait for a free worker thread.
1318 //
1319 // While we wait, we must copy decompressed data to the out
1320 // buffer and catch possible decoder errors.
1321 //
1322 // read_output_and_wait() does all the above.
1323 bool block_can_start = false;
1324
1325 return_if_error(read_output_and_wait(coder, allocator,
1326 out, out_pos, out_size,
1327 &block_can_start, true,
1328 &wait_abs, &has_blocked));
1329
1330 if (coder->pending_error != LZMA_OK) {
1331 coder->sequence = SEQ_ERROR;
1332 break;
1333 }
1334
1335 if (!block_can_start) {
1336 // It's not a timeout because return_if_error handles
1337 // it already. Output queue cannot be empty either
1338 // because in that case block_can_start would have
1339 // been true. Thus the output buffer must be full and
1340 // the queue isn't empty.
1341 assert(*out_pos == out_size);
1342 assert(!lzma_outq_is_empty(&coder->outq));
1343 return LZMA_OK;
1344 }
1345
1346 // We know that we can start decoding this Block without
1347 // exceeding memlimit_threading. However, to stay below
1348 // memlimit_threading may require freeing some of the
1349 // cached memory.
1350 //
1351 // Get a local copy of variables that require locking the
1352 // mutex. It is fine if the worker threads modify the real
1353 // values after we read these as those changes can only be
1354 // towards more favorable conditions (less memory in use,
1355 // more in cache).
1356 //
1357 // These are initialized to silence warnings.
1358#ifndef VBOX
1359 uint64_t mem_in_use;
1360 uint64_t mem_cached;
1361#else
1362 uint64_t mem_in_use = 0;
1363 uint64_t mem_cached = 0;
1364#endif
1365 struct worker_thread *thr = NULL;
1366
1367 mythread_sync(coder->mutex) {
1368 mem_in_use = coder->mem_in_use;
1369 mem_cached = coder->mem_cached;
1370 thr = coder->threads_free;
1371 }
1372
1373 // The maximum amount of memory that can be held by other
1374 // threads and cached buffers while allowing us to start
1375 // decoding the next Block.
1376 const uint64_t mem_max = coder->memlimit_threading
1377 - coder->mem_next_block;
1378
1379 // If the existing allocations are so large that starting
1380 // to decode this Block might exceed memlimit_threads,
1381 // try to free memory from the output queue cache first.
1382 //
1383 // NOTE: This math assumes the worst case. It's possible
1384 // that the limit wouldn't be exceeded if the existing cached
1385 // allocations are reused.
1386 if (mem_in_use + mem_cached + coder->outq.mem_allocated
1387 > mem_max) {
1388 // Clear the outq cache except leave one buffer in
1389 // the cache if its size is correct. That way we
1390 // don't free and almost immediately reallocate
1391 // an identical buffer.
1392 lzma_outq_clear_cache2(&coder->outq, allocator,
1393 coder->block_options.uncompressed_size);
1394 }
1395
1396 // If there is at least one worker_thread in the cache and
1397 // the existing allocations are so large that starting to
1398 // decode this Block might exceed memlimit_threads, free
1399 // memory by freeing cached Block decoders.
1400 //
1401 // NOTE: The comparison is different here than above.
1402 // Here we don't care about cached buffers in outq anymore
1403 // and only look at memory actually in use. This is because
1404 // if there is something in outq cache, it's a single buffer
1405 // that can be used as is. We ensured this in the above
1406 // if-block.
1407 uint64_t mem_freed = 0;
1408 if (thr != NULL && mem_in_use + mem_cached
1409 + coder->outq.mem_in_use > mem_max) {
1410 // Don't free the first Block decoder if its memory
1411 // usage isn't greater than what this Block will need.
1412 // Typically the same filter chain is used for all
1413 // Blocks so this way the allocations can be reused
1414 // when get_thread() picks the first worker_thread
1415 // from the cache.
1416 if (thr->mem_filters <= coder->mem_next_filters)
1417 thr = thr->next;
1418
1419 while (thr != NULL) {
1420 lzma_next_end(&thr->block_decoder, allocator);
1421 mem_freed += thr->mem_filters;
1422 thr->mem_filters = 0;
1423 thr = thr->next;
1424 }
1425 }
1426
1427 // Update the memory usage counters. Note that coder->mem_*
1428 // may have changed since we read them so we must subtract
1429 // or add the changes.
1430 mythread_sync(coder->mutex) {
1431 coder->mem_cached -= mem_freed;
1432
1433 // Memory needed for the filters and the input buffer.
1434 // The output queue takes care of its own counter so
1435 // we don't touch it here.
1436 //
1437 // NOTE: After this, coder->mem_in_use +
1438 // coder->mem_cached might count the same thing twice.
1439 // If so, this will get corrected in get_thread() when
1440 // a worker_thread is picked from coder->free_threads
1441 // and its memory usage is subtracted from mem_cached.
1442 coder->mem_in_use += coder->mem_next_in
1443 + coder->mem_next_filters;
1444 }
1445
1446 // Allocate memory for the output buffer in the output queue.
1447 lzma_ret ret = lzma_outq_prealloc_buf(
1448 &coder->outq, allocator,
1449 coder->block_options.uncompressed_size);
1450 if (ret != LZMA_OK) {
1451 threads_stop(coder);
1452 return ret;
1453 }
1454
1455 // Set up coder->thr.
1456 ret = get_thread(coder, allocator);
1457 if (ret != LZMA_OK) {
1458 threads_stop(coder);
1459 return ret;
1460 }
1461
1462 // The new Block decoder memory usage is already counted in
1463 // coder->mem_in_use. Store it in the thread too.
1464 coder->thr->mem_filters = coder->mem_next_filters;
1465
1466 // Initialize the Block decoder.
1467 coder->thr->block_options = coder->block_options;
1468 ret = lzma_block_decoder_init(
1469 &coder->thr->block_decoder, allocator,
1470 &coder->thr->block_options);
1471
1472 // Free the allocated filter options since they are needed
1473 // only to initialize the Block decoder.
1474 lzma_filters_free(coder->filters, allocator);
1475 coder->thr->block_options.filters = NULL;
1476
1477 // Check if memory usage calculation and Block encoder
1478 // initialization succeeded.
1479 if (ret != LZMA_OK) {
1480 coder->pending_error = ret;
1481 coder->sequence = SEQ_ERROR;
1482 break;
1483 }
1484
1485 // Allocate the input buffer.
1486 coder->thr->in_size = coder->mem_next_in;
1487 coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
1488 if (coder->thr->in == NULL) {
1489 threads_stop(coder);
1490 return LZMA_MEM_ERROR;
1491 }
1492
1493 // Get the preallocated output buffer.
1494 coder->thr->outbuf = lzma_outq_get_buf(
1495 &coder->outq, coder->thr);
1496
1497 // Start the decoder.
1498 mythread_sync(coder->thr->mutex) {
1499 assert(coder->thr->state == THR_IDLE);
1500 coder->thr->state = THR_RUN;
1501 mythread_cond_signal(&coder->thr->cond);
1502 }
1503
1504 // Enable output from the thread that holds the oldest output
1505 // buffer in the output queue (if such a thread exists).
1506 mythread_sync(coder->mutex) {
1507 lzma_outq_enable_partial_output(&coder->outq,
1508 &worker_enable_partial_update);
1509 }
1510
1511 coder->sequence = SEQ_BLOCK_THR_RUN;
1512 FALLTHROUGH;
1513 }
1514
1515 case SEQ_BLOCK_THR_RUN: {
1516 if (action == LZMA_FINISH && coder->fail_fast) {
1517 // We know that we won't get more input and that
1518 // the caller wants fail-fast behavior. If we see
1519 // that we don't have enough input to finish this
1520 // Block, return LZMA_DATA_ERROR immediately.
1521 // See SEQ_BLOCK_HEADER for the error code rationale.
1522 const size_t in_avail = in_size - *in_pos;
1523 const size_t in_needed = coder->thr->in_size
1524 - coder->thr->in_filled;
1525 if (in_avail < in_needed) {
1526 threads_stop(coder);
1527 return LZMA_DATA_ERROR;
1528 }
1529 }
1530
1531 // Copy input to the worker thread.
1532 size_t cur_in_filled = coder->thr->in_filled;
1533 lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
1534 &cur_in_filled, coder->thr->in_size);
1535
1536 // Tell the thread how much we copied.
1537 mythread_sync(coder->thr->mutex) {
1538 coder->thr->in_filled = cur_in_filled;
1539
1540 // NOTE: Most of the time we are copying input faster
1541 // than the thread can decode so most of the time
1542 // calling mythread_cond_signal() is useless but
1543 // we cannot make it conditional because thr->in_pos
1544 // is updated without a mutex. And the overhead should
1545 // be very much negligible anyway.
1546 mythread_cond_signal(&coder->thr->cond);
1547 }
1548
1549 // Read output from the output queue. Just like in
1550 // SEQ_BLOCK_HEADER, we wait to fill the output buffer
1551 // only if waiting_allowed was set to true in the beginning
1552 // of this function (see the comment there) and there is
1553 // no input available. In SEQ_BLOCK_HEADER, there is never
1554 // input available when read_output_and_wait() is called,
1555 // but here there can be when LZMA_FINISH is used, thus we
1556 // need to check if *in_pos == in_size. Otherwise we would
1557 // wait here instead of using the available input to start
1558 // a new thread.
1559 return_if_error(read_output_and_wait(coder, allocator,
1560 out, out_pos, out_size,
1561 NULL,
1562 waiting_allowed && *in_pos == in_size,
1563 &wait_abs, &has_blocked));
1564
1565 if (coder->pending_error != LZMA_OK) {
1566 coder->sequence = SEQ_ERROR;
1567 break;
1568 }
1569
1570 // Return if the input didn't contain the whole Block.
1571 //
1572 // NOTE: When we updated coder->thr->in_filled a few lines
1573 // above, the worker thread might by now have finished its
1574 // work and returned itself back to the stack of free threads.
1575 if (coder->thr->in_filled < coder->thr->in_size) {
1576 assert(*in_pos == in_size);
1577 return LZMA_OK;
1578 }
1579
1580 // The whole Block has been copied to the thread-specific
1581 // buffer. Continue from the next Block Header or Index.
1582 coder->thr = NULL;
1583 coder->sequence = SEQ_BLOCK_HEADER;
1584 break;
1585 }
1586
1587 case SEQ_BLOCK_DIRECT_INIT: {
1588 // Wait for the threads to finish and that all decoded data
1589 // has been copied to the output. That is, wait until the
1590 // output queue becomes empty.
1591 //
1592 // NOTE: No need to check for coder->pending_error as
1593 // we aren't consuming any input until the queue is empty
1594 // and if there is a pending error, read_output_and_wait()
1595 // will eventually return it before the queue is empty.
1596 return_if_error(read_output_and_wait(coder, allocator,
1597 out, out_pos, out_size,
1598 NULL, true, &wait_abs, &has_blocked));
1599 if (!lzma_outq_is_empty(&coder->outq))
1600 return LZMA_OK;
1601
1602 // Free the cached output buffers.
1603 lzma_outq_clear_cache(&coder->outq, allocator);
1604
1605 // Get rid of the worker threads, including the coder->threads
1606 // array.
1607 threads_end(coder, allocator);
1608
1609 // Initialize the Block decoder.
1610 const lzma_ret ret = lzma_block_decoder_init(
1611 &coder->block_decoder, allocator,
1612 &coder->block_options);
1613
1614 // Free the allocated filter options since they are needed
1615 // only to initialize the Block decoder.
1616 lzma_filters_free(coder->filters, allocator);
1617 coder->block_options.filters = NULL;
1618
1619 // Check if Block decoder initialization succeeded.
1620 if (ret != LZMA_OK)
1621 return ret;
1622
1623 // Make the memory usage visible to _memconfig().
1624 coder->mem_direct_mode = coder->mem_next_filters;
1625
1626 coder->sequence = SEQ_BLOCK_DIRECT_RUN;
1627 FALLTHROUGH;
1628 }
1629
1630 case SEQ_BLOCK_DIRECT_RUN: {
1631 const size_t in_old = *in_pos;
1632 const size_t out_old = *out_pos;
1633 const lzma_ret ret = coder->block_decoder.code(
1634 coder->block_decoder.coder, allocator,
1635 in, in_pos, in_size, out, out_pos, out_size,
1636 action);
1637 coder->progress_in += *in_pos - in_old;
1638 coder->progress_out += *out_pos - out_old;
1639
1640 if (ret != LZMA_STREAM_END)
1641 return ret;
1642
1643 // Block decoded successfully. Add the new size pair to
1644 // the Index hash.
1645 return_if_error(lzma_index_hash_append(coder->index_hash,
1646 lzma_block_unpadded_size(
1647 &coder->block_options),
1648 coder->block_options.uncompressed_size));
1649
1650 coder->sequence = SEQ_BLOCK_HEADER;
1651 break;
1652 }
1653
1654 case SEQ_INDEX_WAIT_OUTPUT:
1655 // Flush the output from all worker threads so that we can
1656 // decode the Index without thinking about threading.
1657 return_if_error(read_output_and_wait(coder, allocator,
1658 out, out_pos, out_size,
1659 NULL, true, &wait_abs, &has_blocked));
1660
1661 if (!lzma_outq_is_empty(&coder->outq))
1662 return LZMA_OK;
1663
1664 coder->sequence = SEQ_INDEX_DECODE;
1665 FALLTHROUGH;
1666
1667 case SEQ_INDEX_DECODE: {
1668 // If we don't have any input, don't call
1669 // lzma_index_hash_decode() since it would return
1670 // LZMA_BUF_ERROR, which we must not do here.
1671 if (*in_pos >= in_size)
1672 return LZMA_OK;
1673
1674 // Decode the Index and compare it to the hash calculated
1675 // from the sizes of the Blocks (if any).
1676 const size_t in_old = *in_pos;
1677 const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
1678 in, in_pos, in_size);
1679 coder->progress_in += *in_pos - in_old;
1680 if (ret != LZMA_STREAM_END)
1681 return ret;
1682
1683 coder->sequence = SEQ_STREAM_FOOTER;
1684 FALLTHROUGH;
1685 }
1686
1687 case SEQ_STREAM_FOOTER: {
1688 // Copy the Stream Footer to the internal buffer.
1689 const size_t in_old = *in_pos;
1690 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1691 LZMA_STREAM_HEADER_SIZE);
1692 coder->progress_in += *in_pos - in_old;
1693
1694 // Return if we didn't get the whole Stream Footer yet.
1695 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1696 return LZMA_OK;
1697
1698 coder->pos = 0;
1699
1700 // Decode the Stream Footer. The decoder gives
1701 // LZMA_FORMAT_ERROR if the magic bytes don't match,
1702 // so convert that return code to LZMA_DATA_ERROR.
1703 lzma_stream_flags footer_flags;
1704 const lzma_ret ret = lzma_stream_footer_decode(
1705 &footer_flags, coder->buffer);
1706 if (ret != LZMA_OK)
1707 return ret == LZMA_FORMAT_ERROR
1708 ? LZMA_DATA_ERROR : ret;
1709
1710 // Check that Index Size stored in the Stream Footer matches
1711 // the real size of the Index field.
1712 if (lzma_index_hash_size(coder->index_hash)
1713 != footer_flags.backward_size)
1714 return LZMA_DATA_ERROR;
1715
1716 // Compare that the Stream Flags fields are identical in
1717 // both Stream Header and Stream Footer.
1718 return_if_error(lzma_stream_flags_compare(
1719 &coder->stream_flags, &footer_flags));
1720
1721 if (!coder->concatenated)
1722 return LZMA_STREAM_END;
1723
1724 coder->sequence = SEQ_STREAM_PADDING;
1725 FALLTHROUGH;
1726 }
1727
1728 case SEQ_STREAM_PADDING:
1729 assert(coder->concatenated);
1730
1731 // Skip over possible Stream Padding.
1732 while (true) {
1733 if (*in_pos >= in_size) {
1734 // Unless LZMA_FINISH was used, we cannot
1735 // know if there's more input coming later.
1736 if (action != LZMA_FINISH)
1737 return LZMA_OK;
1738
1739 // Stream Padding must be a multiple of
1740 // four bytes.
1741 return coder->pos == 0
1742 ? LZMA_STREAM_END
1743 : LZMA_DATA_ERROR;
1744 }
1745
1746 // If the byte is not zero, it probably indicates
1747 // beginning of a new Stream (or the file is corrupt).
1748 if (in[*in_pos] != 0x00)
1749 break;
1750
1751 ++*in_pos;
1752 ++coder->progress_in;
1753 coder->pos = (coder->pos + 1) & 3;
1754 }
1755
1756 // Stream Padding must be a multiple of four bytes (empty
1757 // Stream Padding is OK).
1758 if (coder->pos != 0) {
1759 ++*in_pos;
1760 ++coder->progress_in;
1761 return LZMA_DATA_ERROR;
1762 }
1763
1764 // Prepare to decode the next Stream.
1765 return_if_error(stream_decoder_reset(coder, allocator));
1766 break;
1767
1768 case SEQ_ERROR:
1769 if (!coder->fail_fast) {
1770 // Let the application get all data before the point
1771 // where the error was detected. This matches the
1772 // behavior of single-threaded use.
1773 //
1774 // FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
1775 // they are returned immediately. Thus in rare cases
1776 // the output will be less than in the single-threaded
1777 // mode. Maybe this doesn't matter much in practice.
1778 return_if_error(read_output_and_wait(coder, allocator,
1779 out, out_pos, out_size,
1780 NULL, true, &wait_abs, &has_blocked));
1781
1782 // We get here only if the error happened in the main
1783 // thread, for example, unsupported Block Header.
1784 if (!lzma_outq_is_empty(&coder->outq))
1785 return LZMA_OK;
1786 }
1787
1788 // We only get here if no errors were detected by the worker
1789 // threads. Errors from worker threads would have already been
1790 // returned by the call to read_output_and_wait() above.
1791 return coder->pending_error;
1792
1793 default:
1794 assert(0);
1795 return LZMA_PROG_ERROR;
1796 }
1797
1798 // Never reached
1799}
1800
1801
1802static void
1803stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
1804{
1805 struct lzma_stream_coder *coder = coder_ptr;
1806
1807 threads_end(coder, allocator);
1808 lzma_outq_end(&coder->outq, allocator);
1809
1810 lzma_next_end(&coder->block_decoder, allocator);
1811 lzma_filters_free(coder->filters, allocator);
1812 lzma_index_hash_end(coder->index_hash, allocator);
1813
1814 lzma_free(coder, allocator);
1815 return;
1816}
1817
1818
1819static lzma_check
1820stream_decoder_mt_get_check(const void *coder_ptr)
1821{
1822 const struct lzma_stream_coder *coder = coder_ptr;
1823 return coder->stream_flags.check;
1824}
1825
1826
1827static lzma_ret
1828stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
1829 uint64_t *old_memlimit, uint64_t new_memlimit)
1830{
1831 // NOTE: This function gets/sets memlimit_stop. For now,
1832 // memlimit_threading cannot be modified after initialization.
1833 //
1834 // *memusage will include cached memory too. Excluding cached memory
1835 // would be misleading and it wouldn't help the applications to
1836 // know how much memory is actually needed to decompress the file
1837 // because the higher the number of threads and the memlimits are
1838 // the more memory the decoder may use.
1839 //
1840 // Setting a new limit includes the cached memory too and too low
1841 // limits will be rejected. Alternative could be to free the cached
1842 // memory immediately if that helps to bring the limit down but
1843 // the current way is the simplest. It's unlikely that limit needs
1844 // to be lowered in the middle of a file anyway; the typical reason
1845 // to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
1846 // and even such use isn't common.
1847 struct lzma_stream_coder *coder = coder_ptr;
1848
1849 mythread_sync(coder->mutex) {
1850 *memusage = coder->mem_direct_mode
1851 + coder->mem_in_use
1852 + coder->mem_cached
1853 + coder->outq.mem_allocated;
1854 }
1855
1856 // If no filter chains are allocated, *memusage may be zero.
1857 // Always return at least LZMA_MEMUSAGE_BASE.
1858 if (*memusage < LZMA_MEMUSAGE_BASE)
1859 *memusage = LZMA_MEMUSAGE_BASE;
1860
1861 *old_memlimit = coder->memlimit_stop;
1862
1863 if (new_memlimit != 0) {
1864 if (new_memlimit < *memusage)
1865 return LZMA_MEMLIMIT_ERROR;
1866
1867 coder->memlimit_stop = new_memlimit;
1868 }
1869
1870 return LZMA_OK;
1871}
1872
1873
1874static void
1875stream_decoder_mt_get_progress(void *coder_ptr,
1876 uint64_t *progress_in, uint64_t *progress_out)
1877{
1878 struct lzma_stream_coder *coder = coder_ptr;
1879
1880 // Lock coder->mutex to prevent finishing threads from moving their
1881 // progress info from the worker_thread structure to lzma_stream_coder.
1882 mythread_sync(coder->mutex) {
1883 *progress_in = coder->progress_in;
1884 *progress_out = coder->progress_out;
1885
1886 for (size_t i = 0; i < coder->threads_initialized; ++i) {
1887 mythread_sync(coder->threads[i].mutex) {
1888 *progress_in += coder->threads[i].progress_in;
1889 *progress_out += coder->threads[i]
1890 .progress_out;
1891 }
1892 }
1893 }
1894
1895 return;
1896}
1897
1898
1899static lzma_ret
1900stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
1901 const lzma_mt *options)
1902{
1903 struct lzma_stream_coder *coder;
1904
1905 if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
1906 return LZMA_OPTIONS_ERROR;
1907
1908 if (options->flags & ~LZMA_SUPPORTED_FLAGS)
1909 return LZMA_OPTIONS_ERROR;
1910
1911 lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
1912
1913 coder = next->coder;
1914 if (!coder) {
1915 coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
1916 if (coder == NULL)
1917 return LZMA_MEM_ERROR;
1918
1919 next->coder = coder;
1920
1921 if (mythread_mutex_init(&coder->mutex)) {
1922 lzma_free(coder, allocator);
1923 return LZMA_MEM_ERROR;
1924 }
1925
1926 if (mythread_cond_init(&coder->cond)) {
1927 mythread_mutex_destroy(&coder->mutex);
1928 lzma_free(coder, allocator);
1929 return LZMA_MEM_ERROR;
1930 }
1931
1932 next->code = &stream_decode_mt;
1933 next->end = &stream_decoder_mt_end;
1934 next->get_check = &stream_decoder_mt_get_check;
1935 next->memconfig = &stream_decoder_mt_memconfig;
1936 next->get_progress = &stream_decoder_mt_get_progress;
1937
1938 coder->filters[0].id = LZMA_VLI_UNKNOWN;
1939 memzero(&coder->outq, sizeof(coder->outq));
1940
1941 coder->block_decoder = LZMA_NEXT_CODER_INIT;
1942 coder->mem_direct_mode = 0;
1943
1944 coder->index_hash = NULL;
1945 coder->threads = NULL;
1946 coder->threads_free = NULL;
1947 coder->threads_initialized = 0;
1948 }
1949
1950 // Cleanup old filter chain if one remains after unfinished decoding
1951 // of a previous Stream.
1952 lzma_filters_free(coder->filters, allocator);
1953
1954 // By allocating threads from scratch we can start memory-usage
1955 // accounting from scratch, too. Changes in filter and block sizes may
1956 // affect number of threads.
1957 //
1958 // Reusing threads doesn't seem worth it. Unlike the single-threaded
1959 // decoder, with some types of input file combinations reusing
1960 // could leave quite a lot of memory allocated but unused (first
1961 // file could allocate a lot, the next files could use fewer
1962 // threads and some of the allocations from the first file would not
1963 // get freed unless memlimit_threading forces us to clear caches).
1964 //
1965 // NOTE: The direct mode decoder isn't freed here if one exists.
1966 // It will be reused or freed as needed in the main loop.
1967 threads_end(coder, allocator);
1968
1969 // All memusage counters start at 0 (including mem_direct_mode).
1970 // The little extra that is needed for the structs in this file
1971 // get accounted well enough by the filter chain memory usage
1972 // which adds LZMA_MEMUSAGE_BASE for each chain. However,
1973 // stream_decoder_mt_memconfig() has to handle this specially so that
1974 // it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
1975 coder->mem_in_use = 0;
1976 coder->mem_cached = 0;
1977 coder->mem_next_block = 0;
1978
1979 coder->progress_in = 0;
1980 coder->progress_out = 0;
1981
1982 coder->sequence = SEQ_STREAM_HEADER;
1983 coder->thread_error = LZMA_OK;
1984 coder->pending_error = LZMA_OK;
1985 coder->thr = NULL;
1986
1987 coder->timeout = options->timeout;
1988
1989 coder->memlimit_threading = my_max(1, options->memlimit_threading);
1990 coder->memlimit_stop = my_max(1, options->memlimit_stop);
1991 if (coder->memlimit_threading > coder->memlimit_stop)
1992 coder->memlimit_threading = coder->memlimit_stop;
1993
1994 coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
1995 coder->tell_unsupported_check
1996 = (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
1997 coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
1998 coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
1999 coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
2000 coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
2001
2002 coder->first_stream = true;
2003 coder->out_was_filled = false;
2004 coder->pos = 0;
2005
2006 coder->threads_max = options->threads;
2007
2008 return_if_error(lzma_outq_init(&coder->outq, allocator,
2009 coder->threads_max));
2010
2011 return stream_decoder_reset(coder, allocator);
2012}
2013
2014
2015extern LZMA_API(lzma_ret)
2016lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
2017{
2018 lzma_next_strm_init(stream_decoder_mt_init, strm, options);
2019
2020 strm->internal->supported_actions[LZMA_RUN] = true;
2021 strm->internal->supported_actions[LZMA_FINISH] = true;
2022
2023 return LZMA_OK;
2024}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette