VirtualBox

source: vbox/trunk/src/libs/liblzma-5.8.1/rangecoder/range_decoder.h

Last change on this file was 108913, checked in by vboxsync, 4 weeks ago

libs/liblzma: Liblzma ose fix. jiraref:VBP-1635

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.0 KB
Line 
1// SPDX-License-Identifier: 0BSD
2
3///////////////////////////////////////////////////////////////////////////////
4//
5/// \file range_decoder.h
6/// \brief Range Decoder
7///
8// Authors: Igor Pavlov
9// Lasse Collin
10//
11///////////////////////////////////////////////////////////////////////////////
12
13#ifndef LZMA_RANGE_DECODER_H
14#define LZMA_RANGE_DECODER_H
15
16#include "range_common.h"
17
18
19// Choose the range decoder variants to use using a bitmask.
20// If no bits are set, only the basic version is used.
21// If more than one version is selected for the same feature,
22// the last one on the list below is used.
23//
24// Bitwise-or of the following enable branchless C versions:
25// 0x01 normal bittrees
26// 0x02 fixed-sized reverse bittrees
27// 0x04 variable-sized reverse bittrees (not faster)
28// 0x08 matched literal (not faster)
29//
30// GCC & Clang compatible x86-64 inline assembly:
31// 0x010 normal bittrees
32// 0x020 fixed-sized reverse bittrees
33// 0x040 variable-sized reverse bittrees
34// 0x080 matched literal
35// 0x100 direct bits
36//
37// The default can be overridden at build time by defining
38// LZMA_RANGE_DECODER_CONFIG to the desired mask.
39//
40// 2024-02-22: Feedback from benchmarks:
41// - Brancless C (0x003) can be better than basic on x86-64 but often it's
42// slightly worse on other archs. Since asm is much better on x86-64,
43// branchless C is not used at all.
44// - With x86-64 asm, there are slight differences between GCC and Clang
45// and different processors. Overall 0x1F0 seems to be the best choice.
46#ifndef LZMA_RANGE_DECODER_CONFIG
47# if defined(__x86_64__) && !defined(__ILP32__) \
48 && !defined(__NVCOMPILER) \
49 && (defined(__GNUC__) || defined(__clang__))
50# define LZMA_RANGE_DECODER_CONFIG 0x1F0
51# else
52# define LZMA_RANGE_DECODER_CONFIG 0
53# endif
54#endif
55
56
57// Negative RC_BIT_MODEL_TOTAL but the lowest RC_MOVE_BITS are flipped.
58// This is useful for updating probability variables in branchless decoding:
59//
60// uint32_t decoded_bit = ...;
61// probability tmp = RC_BIT_MODEL_OFFSET;
62// tmp &= decoded_bit - 1;
63// prob -= (prob + tmp) >> RC_MOVE_BITS;
64#define RC_BIT_MODEL_OFFSET \
65 ((UINT32_C(1) << RC_MOVE_BITS) - 1 - RC_BIT_MODEL_TOTAL)
66
67
68typedef struct {
69 uint32_t range;
70 uint32_t code;
71 uint32_t init_bytes_left;
72} lzma_range_decoder;
73
74
75/// Reads the first five bytes to initialize the range decoder.
76static inline lzma_ret
77rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
78 size_t *restrict in_pos, size_t in_size)
79{
80 while (rc->init_bytes_left > 0) {
81 if (*in_pos == in_size)
82 return LZMA_OK;
83
84 // The first byte is always 0x00. It could have been omitted
85 // in LZMA2 but it wasn't, so one byte is wasted in every
86 // LZMA2 chunk.
87 if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)
88 return LZMA_DATA_ERROR;
89
90 rc->code = (rc->code << 8) | in[*in_pos];
91 ++*in_pos;
92 --rc->init_bytes_left;
93 }
94
95 return LZMA_STREAM_END;
96}
97
98
99/// Makes local copies of range decoder and *in_pos variables. Doing this
100/// improves speed significantly. The range decoder macros expect also
101/// variables 'in' and 'in_size' to be defined.
102#define rc_to_local(range_decoder, in_pos, fast_mode_in_required) \
103 lzma_range_decoder rc = range_decoder; \
104 const uint8_t *rc_in_ptr = in + (in_pos); \
105 const uint8_t *rc_in_end = in + in_size; \
106 const uint8_t *rc_in_fast_end \
107 = (rc_in_end - rc_in_ptr) <= (fast_mode_in_required) \
108 ? rc_in_ptr \
109 : rc_in_end - (fast_mode_in_required); \
110 (void)rc_in_fast_end; /* Silence a warning with HAVE_SMALL. */ \
111 uint32_t rc_bound
112
113
114/// Evaluates to true if there is enough input remaining to use fast mode.
115#define rc_is_fast_allowed() (rc_in_ptr < rc_in_fast_end)
116
117
118/// Stores the local copes back to the range decoder structure.
119#define rc_from_local(range_decoder, in_pos) \
120do { \
121 range_decoder = rc; \
122 in_pos = (size_t)(rc_in_ptr - in); \
123} while (0)
124
125
126/// Resets the range decoder structure.
127#define rc_reset(range_decoder) \
128do { \
129 (range_decoder).range = UINT32_MAX; \
130 (range_decoder).code = 0; \
131 (range_decoder).init_bytes_left = 5; \
132} while (0)
133
134
135/// When decoding has been properly finished, rc.code is always zero unless
136/// the input stream is corrupt. So checking this can catch some corrupt
137/// files especially if they don't have any other integrity check.
138#define rc_is_finished(range_decoder) \
139 ((range_decoder).code == 0)
140
141
142// Read the next input byte if needed.
143#define rc_normalize() \
144do { \
145 if (rc.range < RC_TOP_VALUE) { \
146 rc.range <<= RC_SHIFT_BITS; \
147 rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
148 } \
149} while (0)
150
151
152/// If more input is needed but there is
153/// no more input available, "goto out" is used to jump out of the main
154/// decoder loop. The "_safe" macros are used in the Resumable decoder
155/// mode in order to save the sequence to continue decoding from that
156/// point later.
157#define rc_normalize_safe(seq) \
158do { \
159 if (rc.range < RC_TOP_VALUE) { \
160 if (rc_in_ptr == rc_in_end) { \
161 coder->sequence = seq; \
162 goto out; \
163 } \
164 rc.range <<= RC_SHIFT_BITS; \
165 rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
166 } \
167} while (0)
168
169
170/// Start decoding a bit. This must be used together with rc_update_0()
171/// and rc_update_1():
172///
173/// rc_if_0(prob) {
174/// rc_update_0(prob);
175/// // Do something
176/// } else {
177/// rc_update_1(prob);
178/// // Do something else
179/// }
180///
181#define rc_if_0(prob) \
182 rc_normalize(); \
183 rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
184 if (rc.code < rc_bound)
185
186
187#define rc_if_0_safe(prob, seq) \
188 rc_normalize_safe(seq); \
189 rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
190 if (rc.code < rc_bound)
191
192
193/// Update the range decoder state and the used probability variable to
194/// match a decoded bit of 0.
195///
196/// The x86-64 assembly uses the commented method but it seems that,
197/// at least on x86-64, the first version is slightly faster as C code.
198#define rc_update_0(prob) \
199do { \
200 rc.range = rc_bound; \
201 prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \
202 /* prob -= ((prob) + RC_BIT_MODEL_OFFSET) >> RC_MOVE_BITS; */ \
203} while (0)
204
205
206/// Update the range decoder state and the used probability variable to
207/// match a decoded bit of 1.
208#define rc_update_1(prob) \
209do { \
210 rc.range -= rc_bound; \
211 rc.code -= rc_bound; \
212 prob -= (prob) >> RC_MOVE_BITS; \
213} while (0)
214
215
216/// Decodes one bit and runs action0 or action1 depending on the decoded bit.
217/// This macro is used as the last step in bittree reverse decoders since
218/// those don't use "symbol" for anything else than indexing the probability
219/// arrays.
220#define rc_bit_last(prob, action0, action1) \
221do { \
222 rc_if_0(prob) { \
223 rc_update_0(prob); \
224 action0; \
225 } else { \
226 rc_update_1(prob); \
227 action1; \
228 } \
229} while (0)
230
231
232#define rc_bit_last_safe(prob, action0, action1, seq) \
233do { \
234 rc_if_0_safe(prob, seq) { \
235 rc_update_0(prob); \
236 action0; \
237 } else { \
238 rc_update_1(prob); \
239 action1; \
240 } \
241} while (0)
242
243
244/// Decodes one bit, updates "symbol", and runs action0 or action1 depending
245/// on the decoded bit.
246#define rc_bit(prob, action0, action1) \
247 rc_bit_last(prob, \
248 symbol <<= 1; action0, \
249 symbol = (symbol << 1) + 1; action1);
250
251
252#define rc_bit_safe(prob, action0, action1, seq) \
253 rc_bit_last_safe(prob, \
254 symbol <<= 1; action0, \
255 symbol = (symbol << 1) + 1; action1, \
256 seq);
257
258// Unroll fixed-sized bittree decoding.
259//
260// A compile-time constant in final_add can be used to get rid of the high bit
261// from symbol that is used for the array indexing (1U << bittree_bits).
262// final_add may also be used to add offset to the result (LZMA length
263// decoder does that).
264//
265// The reason to have final_add here is that in the asm code the addition
266// can be done for free: in x86-64 there is SBB instruction with -1 as
267// the immediate value, and final_add is combined with that value.
268#define rc_bittree_bit(prob) \
269 rc_bit(prob, , )
270
271#define rc_bittree3(probs, final_add) \
272do { \
273 symbol = 1; \
274 rc_bittree_bit(probs[symbol]); \
275 rc_bittree_bit(probs[symbol]); \
276 rc_bittree_bit(probs[symbol]); \
277 symbol += (uint32_t)(final_add); \
278} while (0)
279
280#define rc_bittree6(probs, final_add) \
281do { \
282 symbol = 1; \
283 rc_bittree_bit(probs[symbol]); \
284 rc_bittree_bit(probs[symbol]); \
285 rc_bittree_bit(probs[symbol]); \
286 rc_bittree_bit(probs[symbol]); \
287 rc_bittree_bit(probs[symbol]); \
288 rc_bittree_bit(probs[symbol]); \
289 symbol += (uint32_t)(final_add); \
290} while (0)
291
292#define rc_bittree8(probs, final_add) \
293do { \
294 symbol = 1; \
295 rc_bittree_bit(probs[symbol]); \
296 rc_bittree_bit(probs[symbol]); \
297 rc_bittree_bit(probs[symbol]); \
298 rc_bittree_bit(probs[symbol]); \
299 rc_bittree_bit(probs[symbol]); \
300 rc_bittree_bit(probs[symbol]); \
301 rc_bittree_bit(probs[symbol]); \
302 rc_bittree_bit(probs[symbol]); \
303 symbol += (uint32_t)(final_add); \
304} while (0)
305
306
307// Fixed-sized reverse bittree
308#define rc_bittree_rev4(probs) \
309do { \
310 symbol = 0; \
311 rc_bit_last(probs[symbol + 1], , symbol += 1); \
312 rc_bit_last(probs[symbol + 2], , symbol += 2); \
313 rc_bit_last(probs[symbol + 4], , symbol += 4); \
314 rc_bit_last(probs[symbol + 8], , symbol += 8); \
315} while (0)
316
317
318// Decode one bit from variable-sized reverse bittree. The loop is done
319// in the code that uses this macro. This could be changed if the assembly
320// version benefited from having the loop done in assembly but it didn't
321// seem so in early 2024.
322//
323// Also, if the loop was done here, the loop counter would likely be local
324// to the macro so that it wouldn't modify yet another input variable.
325// If a _safe version of a macro with a loop was done then a modifiable
326// input variable couldn't be avoided though.
327#define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
328 rc_bit(probs[symbol], \
329 , \
330 dest += value_to_add_if_1);
331
332
333// Matched literal
334#define decode_with_match_bit \
335 t_match_byte <<= 1; \
336 t_match_bit = t_match_byte & t_offset; \
337 t_subcoder_index = t_offset + t_match_bit + symbol; \
338 rc_bit(probs[t_subcoder_index], \
339 t_offset &= ~t_match_bit, \
340 t_offset &= t_match_bit)
341
342#define rc_matched_literal(probs_base_var, match_byte) \
343do { \
344 uint32_t t_match_byte = (match_byte); \
345 uint32_t t_match_bit; \
346 uint32_t t_subcoder_index; \
347 uint32_t t_offset = 0x100; \
348 symbol = 1; \
349 decode_with_match_bit; \
350 decode_with_match_bit; \
351 decode_with_match_bit; \
352 decode_with_match_bit; \
353 decode_with_match_bit; \
354 decode_with_match_bit; \
355 decode_with_match_bit; \
356 decode_with_match_bit; \
357} while (0)
358
359
360/// Decode a bit without using a probability.
361//
362// NOTE: GCC 13 and Clang/LLVM 16 can, at least on x86-64, optimize the bound
363// calculation to use an arithmetic right shift so there's no need to provide
364// the alternative code which, according to C99/C11/C23 6.3.1.3-p3 isn't
365// perfectly portable: rc_bound = (uint32_t)((int32_t)rc.code >> 31);
366#define rc_direct(dest, count_var) \
367do { \
368 dest = (dest << 1) + 1; \
369 rc_normalize(); \
370 rc.range >>= 1; \
371 rc.code -= rc.range; \
372 rc_bound = UINT32_C(0) - (rc.code >> 31); \
373 dest += rc_bound; \
374 rc.code += rc.range & rc_bound; \
375} while (--count_var > 0)
376
377
378
379#define rc_direct_safe(dest, count_var, seq) \
380do { \
381 rc_normalize_safe(seq); \
382 rc.range >>= 1; \
383 rc.code -= rc.range; \
384 rc_bound = UINT32_C(0) - (rc.code >> 31); \
385 rc.code += rc.range & rc_bound; \
386 dest = (dest << 1) + (rc_bound + 1); \
387} while (--count_var > 0)
388
389
390//////////////////
391// Branchless C //
392//////////////////
393
394/// Decode a bit using a branchless method. This reduces the number of
395/// mispredicted branches and thus can improve speed.
396#define rc_c_bit(prob, action_bit, action_neg) \
397do { \
398 probability *p = &(prob); \
399 rc_normalize(); \
400 rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * *p; \
401 uint32_t rc_mask = rc.code >= rc_bound; /* rc_mask = decoded bit */ \
402 action_bit; /* action when rc_mask is 0 or 1 */ \
403 /* rc_mask becomes 0 if bit is 0 and 0xFFFFFFFF if bit is 1: */ \
404 rc_mask = 0U - rc_mask; \
405 rc.range &= rc_mask; /* If bit 0: set rc.range = 0 */ \
406 rc_bound ^= rc_mask; \
407 rc_bound -= rc_mask; /* If bit 1: rc_bound = 0U - rc_bound */ \
408 rc.range += rc_bound; \
409 rc_bound &= rc_mask; \
410 rc.code += rc_bound; \
411 action_neg; /* action when rc_mask is 0 or 0xFFFFFFFF */ \
412 rc_mask = ~rc_mask; /* If bit 0: all bits are set in rc_mask */ \
413 rc_mask &= RC_BIT_MODEL_OFFSET; \
414 *p -= (*p + rc_mask) >> RC_MOVE_BITS; \
415} while (0)
416
417
418// Testing on x86-64 give an impression that only the normal bittrees and
419// the fixed-sized reverse bittrees are worth the branchless C code.
420// It should be tested on other archs for which there isn't assembly code
421// in this file.
422
423// Using addition in "(symbol << 1) + rc_mask" allows use of x86 LEA
424// or RISC-V SH1ADD instructions. Compilers might infer it from
425// "(symbol << 1) | rc_mask" too if they see that mask is 0 or 1 but
426// the use of addition doesn't require such analysis from compilers.
427#if LZMA_RANGE_DECODER_CONFIG & 0x01
428#undef rc_bittree_bit
429#define rc_bittree_bit(prob) \
430 rc_c_bit(prob, \
431 symbol = (symbol << 1) + rc_mask, \
432 )
433#endif // LZMA_RANGE_DECODER_CONFIG & 0x01
434
435#if LZMA_RANGE_DECODER_CONFIG & 0x02
436#undef rc_bittree_rev4
437#define rc_bittree_rev4(probs) \
438do { \
439 symbol = 0; \
440 rc_c_bit(probs[symbol + 1], symbol += rc_mask, ); \
441 rc_c_bit(probs[symbol + 2], symbol += rc_mask << 1, ); \
442 rc_c_bit(probs[symbol + 4], symbol += rc_mask << 2, ); \
443 rc_c_bit(probs[symbol + 8], symbol += rc_mask << 3, ); \
444} while (0)
445#endif // LZMA_RANGE_DECODER_CONFIG & 0x02
446
447#if LZMA_RANGE_DECODER_CONFIG & 0x04
448#undef rc_bit_add_if_1
449#define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
450 rc_c_bit(probs[symbol], \
451 symbol = (symbol << 1) + rc_mask, \
452 dest += (value_to_add_if_1) & rc_mask)
453#endif // LZMA_RANGE_DECODER_CONFIG & 0x04
454
455
456#if LZMA_RANGE_DECODER_CONFIG & 0x08
457#undef decode_with_match_bit
458#define decode_with_match_bit \
459 t_match_byte <<= 1; \
460 t_match_bit = t_match_byte & t_offset; \
461 t_subcoder_index = t_offset + t_match_bit + symbol; \
462 rc_c_bit(probs[t_subcoder_index], \
463 symbol = (symbol << 1) + rc_mask, \
464 t_offset &= ~t_match_bit ^ rc_mask)
465#endif // LZMA_RANGE_DECODER_CONFIG & 0x08
466
467
468////////////
469// x86-64 //
470////////////
471
472#if LZMA_RANGE_DECODER_CONFIG & 0x1F0
473
474// rc_asm_y and rc_asm_n are used as arguments to macros to control which
475// strings to include or omit.
476#define rc_asm_y(str) str
477#define rc_asm_n(str)
478
479// There are a few possible variations for normalization.
480// This is the smallest variant which is also used by LZMA SDK.
481//
482// - This has partial register write (the MOV from (%[in_ptr])).
483//
484// - INC saves one byte in code size over ADD. False dependency on
485// partial flags from INC shouldn't become a problem on any processor
486// because the instructions after normalization don't read the flags
487// until SUB which sets all flags.
488//
489#define rc_asm_normalize \
490 "cmp %[top_value], %[range]\n\t" \
491 "jae 1f\n\t" \
492 "shl %[shift_bits], %[code]\n\t" \
493 "mov (%[in_ptr]), %b[code]\n\t" \
494 "shl %[shift_bits], %[range]\n\t" \
495 "inc %[in_ptr]\n" \
496 "1:\n"
497
498// rc_asm_calc(prob) is roughly equivalent to the C version of rc_if_0(prob)...
499//
500// rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
501// if (rc.code < rc_bound)
502//
503// ...but the bound is stored in "range":
504//
505// t0 = range;
506// range = (range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
507// t0 -= range;
508// t1 = code;
509// code -= range;
510//
511// The carry flag (CF) from the last subtraction holds the negation of
512// the decoded bit (if CF==0 then the decoded bit is 1).
513// The values in t0 and t1 are needed for rc_update_0(prob) and
514// rc_update_1(prob). If the bit is 0, rc_update_0(prob)...
515//
516// rc.range = rc_bound;
517//
518// ...has already been done but the "code -= range" has to be reverted using
519// the old value stored in t1. (Also, prob needs to be updated.)
520//
521// If the bit is 1, rc_update_1(prob)...
522//
523// rc.range -= rc_bound;
524// rc.code -= rc_bound;
525//
526// ...is already done for "code" but the value for "range" needs to be taken
527// from t0. (Also, prob needs to be updated here as well.)
528//
529// The assignments from t0 and t1 can be done in a branchless manner with CMOV
530// after the instructions from this macro. The CF from SUB tells which moves
531// are needed.
532#define rc_asm_calc(prob) \
533 "mov %[range], %[t0]\n\t" \
534 "shr %[bit_model_total_bits], %[range]\n\t" \
535 "imul %[" prob "], %[range]\n\t" \
536 "sub %[range], %[t0]\n\t" \
537 "mov %[code], %[t1]\n\t" \
538 "sub %[range], %[code]\n\t"
539
540// Also, prob needs to be updated: The update math depends on the decoded bit.
541// It can be expressed in a few slightly different ways but this is fairly
542// convenient here:
543//
544// prob -= (prob + (bit ? 0 : RC_BIT_MODEL_OFFSET)) >> RC_MOVE_BITS;
545//
546// To do it in branchless way when the negation of the decoded bit is in CF,
547// both "prob" and "prob + RC_BIT_MODEL_OFFSET" are needed. Then the desired
548// value can be picked with CMOV. The addition can be done using LEA without
549// affecting CF.
550//
551// (This prob update method is a tiny bit different from LZMA SDK 23.01.
552// In the LZMA SDK a single register is reserved solely for a constant to
553// be used with CMOV when updating prob. That is fine since there are enough
554// free registers to do so. The method used here uses one fewer register,
555// which is valuable with inline assembly.)
556//
557// * * *
558//
559// In bittree decoding, each (unrolled) loop iteration decodes one bit
560// and needs one prob variable. To make it faster, the prob variable of
561// the iteration N+1 is loaded during iteration N. There are two possible
562// prob variables to choose from for N+1. Both are loaded from memory and
563// the correct one is chosen with CMOV using the same CF as is used for
564// other things described above.
565//
566// This preloading/prefetching requires an extra register. To avoid
567// useless moves from "preloaded prob register" to "current prob register",
568// the macros swap between the two registers for odd and even iterations.
569//
570// * * *
571//
572// Finally, the decoded bit has to be stored in "symbol". Since the negation
573// of the bit is in CF, this can be done with SBB: symbol -= CF - 1. That is,
574// if the decoded bit is 0 (CF==1) the operation is a no-op "symbol -= 0"
575// and when bit is 1 (CF==0) the operation is "symbol -= 0 - 1" which is
576// the same as "symbol += 1".
577//
578// The instructions for all things are intertwined for a few reasons:
579// - freeing temporary registers for new use
580// - not modifying CF too early
581// - instruction scheduling
582//
583// The first and last iterations can cheat a little. For example,
584// on the first iteration "symbol" is known to start from 1 so it
585// doesn't need to be read; it can even be immediately initialized
586// to 2 to prepare for the second iteration of the loop.
587//
588// * * *
589//
590// a = number of the current prob variable (0 or 1)
591// b = number of the next prob variable (1 or 0)
592// *_only = rc_asm_y or _n to include or exclude code marked with them
593#define rc_asm_bittree(a, b, first_only, middle_only, last_only) \
594 first_only( \
595 "movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \
596 "mov $2, %[symbol]\n\t" \
597 "movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \
598 ) \
599 middle_only( \
600 /* Note the scaling of 4 instead of 2: */ \
601 "movzwl (%[probs_base], %q[symbol], 4), %[prob" #b "]\n\t" \
602 ) \
603 last_only( \
604 "add %[symbol], %[symbol]\n\t" \
605 ) \
606 \
607 rc_asm_normalize \
608 rc_asm_calc("prob" #a) \
609 \
610 "cmovae %[t0], %[range]\n\t" \
611 \
612 first_only( \
613 "movzwl 6(%[probs_base]), %[t0]\n\t" \
614 "cmovae %[t0], %[prob" #b "]\n\t" \
615 ) \
616 middle_only( \
617 "movzwl 2(%[probs_base], %q[symbol], 4), %[t0]\n\t" \
618 "lea (%q[symbol], %q[symbol]), %[symbol]\n\t" \
619 "cmovae %[t0], %[prob" #b "]\n\t" \
620 ) \
621 \
622 "lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
623 "cmovb %[t1], %[code]\n\t" \
624 "mov %[symbol], %[t1]\n\t" \
625 "cmovae %[prob" #a "], %[t0]\n\t" \
626 \
627 first_only( \
628 "sbb $-1, %[symbol]\n\t" \
629 ) \
630 middle_only( \
631 "sbb $-1, %[symbol]\n\t" \
632 ) \
633 last_only( \
634 "sbb %[last_sbb], %[symbol]\n\t" \
635 ) \
636 \
637 "shr %[move_bits], %[t0]\n\t" \
638 "sub %[t0], %[prob" #a "]\n\t" \
639 /* Scaling of 1 instead of 2 because symbol <<= 1. */ \
640 "mov %w[prob" #a "], (%[probs_base], %q[t1], 1)\n\t"
641
642// NOTE: The order of variables in __asm__ can affect speed and code size.
643#define rc_asm_bittree_n(probs_base_var, final_add, asm_str) \
644do { \
645 uint32_t t0; \
646 uint32_t t1; \
647 uint32_t t_prob0; \
648 uint32_t t_prob1; \
649 \
650 __asm__( \
651 asm_str \
652 : \
653 [range] "+&r"(rc.range), \
654 [code] "+&r"(rc.code), \
655 [t0] "=&r"(t0), \
656 [t1] "=&r"(t1), \
657 [prob0] "=&r"(t_prob0), \
658 [prob1] "=&r"(t_prob1), \
659 [symbol] "=&r"(symbol), \
660 [in_ptr] "+&r"(rc_in_ptr) \
661 : \
662 [probs_base] "r"(probs_base_var), \
663 [last_sbb] "n"(-1 - (final_add)), \
664 [top_value] "n"(RC_TOP_VALUE), \
665 [shift_bits] "n"(RC_SHIFT_BITS), \
666 [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
667 [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
668 [move_bits] "n"(RC_MOVE_BITS) \
669 : \
670 "cc", "memory"); \
671} while (0)
672
673
674#if LZMA_RANGE_DECODER_CONFIG & 0x010
675#undef rc_bittree3
676#define rc_bittree3(probs_base_var, final_add) \
677 rc_asm_bittree_n(probs_base_var, final_add, \
678 rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
679 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
680 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_n, rc_asm_y) \
681 )
682
683#undef rc_bittree6
684#define rc_bittree6(probs_base_var, final_add) \
685 rc_asm_bittree_n(probs_base_var, final_add, \
686 rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
687 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
688 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
689 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
690 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
691 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
692 )
693
694#undef rc_bittree8
695#define rc_bittree8(probs_base_var, final_add) \
696 rc_asm_bittree_n(probs_base_var, final_add, \
697 rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
698 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
699 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
700 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
701 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
702 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
703 rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
704 rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
705 )
706#endif // LZMA_RANGE_DECODER_CONFIG & 0x010
707
708
709// Fixed-sized reverse bittree
710//
711// This uses the indexing that constructs the final value in symbol directly.
712// add = 1, 2, 4, 8
713// dcur = -, 4, 8, 16
714// dnext0 = 4, 8, 16, -
715// dnext0 = 6, 12, 24, -
716#define rc_asm_bittree_rev(a, b, add, dcur, dnext0, dnext1, \
717 first_only, middle_only, last_only) \
718 first_only( \
719 "movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \
720 "xor %[symbol], %[symbol]\n\t" \
721 "movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \
722 ) \
723 middle_only( \
724 "movzwl " #dnext0 "(%[probs_base], %q[symbol], 2), " \
725 "%[prob" #b "]\n\t" \
726 ) \
727 \
728 rc_asm_normalize \
729 rc_asm_calc("prob" #a) \
730 \
731 "cmovae %[t0], %[range]\n\t" \
732 \
733 first_only( \
734 "movzwl 6(%[probs_base]), %[t0]\n\t" \
735 "cmovae %[t0], %[prob" #b "]\n\t" \
736 ) \
737 middle_only( \
738 "movzwl " #dnext1 "(%[probs_base], %q[symbol], 2), %[t0]\n\t" \
739 "cmovae %[t0], %[prob" #b "]\n\t" \
740 ) \
741 \
742 "lea " #add "(%q[symbol]), %[t0]\n\t" \
743 "cmovb %[t1], %[code]\n\t" \
744 middle_only( \
745 "mov %[symbol], %[t1]\n\t" \
746 ) \
747 last_only( \
748 "mov %[symbol], %[t1]\n\t" \
749 ) \
750 "cmovae %[t0], %[symbol]\n\t" \
751 "lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
752 "cmovae %[prob" #a "], %[t0]\n\t" \
753 \
754 "shr %[move_bits], %[t0]\n\t" \
755 "sub %[t0], %[prob" #a "]\n\t" \
756 first_only( \
757 "mov %w[prob" #a "], 2(%[probs_base])\n\t" \
758 ) \
759 middle_only( \
760 "mov %w[prob" #a "], " \
761 #dcur "(%[probs_base], %q[t1], 2)\n\t" \
762 ) \
763 last_only( \
764 "mov %w[prob" #a "], " \
765 #dcur "(%[probs_base], %q[t1], 2)\n\t" \
766 )
767
768#if LZMA_RANGE_DECODER_CONFIG & 0x020
769#undef rc_bittree_rev4
770#define rc_bittree_rev4(probs_base_var) \
771rc_asm_bittree_n(probs_base_var, 4, \
772 rc_asm_bittree_rev(0, 1, 1, -, 4, 6, rc_asm_y, rc_asm_n, rc_asm_n) \
773 rc_asm_bittree_rev(1, 0, 2, 4, 8, 12, rc_asm_n, rc_asm_y, rc_asm_n) \
774 rc_asm_bittree_rev(0, 1, 4, 8, 16, 24, rc_asm_n, rc_asm_y, rc_asm_n) \
775 rc_asm_bittree_rev(1, 0, 8, 16, -, -, rc_asm_n, rc_asm_n, rc_asm_y) \
776)
777#endif // LZMA_RANGE_DECODER_CONFIG & 0x020
778
779
780#if LZMA_RANGE_DECODER_CONFIG & 0x040
781#undef rc_bit_add_if_1
782#define rc_bit_add_if_1(probs_base_var, dest_var, value_to_add_if_1) \
783do { \
784 uint32_t t0; \
785 uint32_t t1; \
786 uint32_t t2 = (value_to_add_if_1); \
787 uint32_t t_prob; \
788 uint32_t t_index; \
789 \
790 __asm__( \
791 "movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \
792 "mov %[symbol], %[index]\n\t" \
793 \
794 "add %[dest], %[t2]\n\t" \
795 "add %[symbol], %[symbol]\n\t" \
796 \
797 rc_asm_normalize \
798 rc_asm_calc("prob") \
799 \
800 "cmovae %[t0], %[range]\n\t" \
801 "lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \
802 "cmovb %[t1], %[code]\n\t" \
803 "cmovae %[prob], %[t0]\n\t" \
804 \
805 "cmovae %[t2], %[dest]\n\t" \
806 "sbb $-1, %[symbol]\n\t" \
807 \
808 "sar %[move_bits], %[t0]\n\t" \
809 "sub %[t0], %[prob]\n\t" \
810 "mov %w[prob], (%[probs_base], %q[index], 2)" \
811 : \
812 [range] "+&r"(rc.range), \
813 [code] "+&r"(rc.code), \
814 [t0] "=&r"(t0), \
815 [t1] "=&r"(t1), \
816 [prob] "=&r"(t_prob), \
817 [index] "=&r"(t_index), \
818 [symbol] "+&r"(symbol), \
819 [t2] "+&r"(t2), \
820 [dest] "+&r"(dest_var), \
821 [in_ptr] "+&r"(rc_in_ptr) \
822 : \
823 [probs_base] "r"(probs_base_var), \
824 [top_value] "n"(RC_TOP_VALUE), \
825 [shift_bits] "n"(RC_SHIFT_BITS), \
826 [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
827 [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
828 [move_bits] "n"(RC_MOVE_BITS) \
829 : \
830 "cc", "memory"); \
831} while (0)
832#endif // LZMA_RANGE_DECODER_CONFIG & 0x040
833
834
835// Literal decoding uses a normal 8-bit bittree but literal with match byte
836// is more complex in picking the probability variable from the correct
837// subtree. This doesn't use preloading/prefetching of the next prob because
838// there are four choices instead of two.
839//
840// FIXME? The first iteration starts with symbol = 1 so it could be optimized
841// by a tiny amount.
842#define rc_asm_matched_literal(nonlast_only) \
843 "add %[offset], %[symbol]\n\t" \
844 "and %[offset], %[match_bit]\n\t" \
845 "add %[match_bit], %[symbol]\n\t" \
846 \
847 "movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \
848 \
849 "add %[symbol], %[symbol]\n\t" \
850 \
851 nonlast_only( \
852 "xor %[match_bit], %[offset]\n\t" \
853 "add %[match_byte], %[match_byte]\n\t" \
854 ) \
855 \
856 rc_asm_normalize \
857 rc_asm_calc("prob") \
858 \
859 "cmovae %[t0], %[range]\n\t" \
860 "lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \
861 "cmovb %[t1], %[code]\n\t" \
862 "mov %[symbol], %[t1]\n\t" \
863 "cmovae %[prob], %[t0]\n\t" \
864 \
865 nonlast_only( \
866 "cmovae %[match_bit], %[offset]\n\t" \
867 "mov %[match_byte], %[match_bit]\n\t" \
868 ) \
869 \
870 "sbb $-1, %[symbol]\n\t" \
871 \
872 "shr %[move_bits], %[t0]\n\t" \
873 /* Undo symbol += match_bit + offset: */ \
874 "and $0x1FF, %[symbol]\n\t" \
875 "sub %[t0], %[prob]\n\t" \
876 \
877 /* Scaling of 1 instead of 2 because symbol <<= 1. */ \
878 "mov %w[prob], (%[probs_base], %q[t1], 1)\n\t"
879
880
881#if LZMA_RANGE_DECODER_CONFIG & 0x080
882#undef rc_matched_literal
883#define rc_matched_literal(probs_base_var, match_byte_value) \
884do { \
885 uint32_t t0; \
886 uint32_t t1; \
887 uint32_t t_prob; \
888 uint32_t t_match_byte = (uint32_t)(match_byte_value) << 1; \
889 uint32_t t_match_bit = t_match_byte; \
890 uint32_t t_offset = 0x100; \
891 symbol = 1; \
892 \
893 __asm__( \
894 rc_asm_matched_literal(rc_asm_y) \
895 rc_asm_matched_literal(rc_asm_y) \
896 rc_asm_matched_literal(rc_asm_y) \
897 rc_asm_matched_literal(rc_asm_y) \
898 rc_asm_matched_literal(rc_asm_y) \
899 rc_asm_matched_literal(rc_asm_y) \
900 rc_asm_matched_literal(rc_asm_y) \
901 rc_asm_matched_literal(rc_asm_n) \
902 : \
903 [range] "+&r"(rc.range), \
904 [code] "+&r"(rc.code), \
905 [t0] "=&r"(t0), \
906 [t1] "=&r"(t1), \
907 [prob] "=&r"(t_prob), \
908 [match_bit] "+&r"(t_match_bit), \
909 [symbol] "+&r"(symbol), \
910 [match_byte] "+&r"(t_match_byte), \
911 [offset] "+&r"(t_offset), \
912 [in_ptr] "+&r"(rc_in_ptr) \
913 : \
914 [probs_base] "r"(probs_base_var), \
915 [top_value] "n"(RC_TOP_VALUE), \
916 [shift_bits] "n"(RC_SHIFT_BITS), \
917 [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
918 [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
919 [move_bits] "n"(RC_MOVE_BITS) \
920 : \
921 "cc", "memory"); \
922} while (0)
923#endif // LZMA_RANGE_DECODER_CONFIG & 0x080
924
925
926// Doing the loop in asm instead of C seems to help a little.
927#if LZMA_RANGE_DECODER_CONFIG & 0x100
928#undef rc_direct
929#define rc_direct(dest_var, count_var) \
930do { \
931 uint32_t t0; \
932 uint32_t t1; \
933 \
934 __asm__( \
935 "2:\n\t" \
936 "add %[dest], %[dest]\n\t" \
937 "lea 1(%q[dest]), %[t1]\n\t" \
938 \
939 rc_asm_normalize \
940 \
941 "shr $1, %[range]\n\t" \
942 "mov %[code], %[t0]\n\t" \
943 "sub %[range], %[code]\n\t" \
944 "cmovns %[t1], %[dest]\n\t" \
945 "cmovs %[t0], %[code]\n\t" \
946 "dec %[count]\n\t" \
947 "jnz 2b\n\t" \
948 : \
949 [range] "+&r"(rc.range), \
950 [code] "+&r"(rc.code), \
951 [t0] "=&r"(t0), \
952 [t1] "=&r"(t1), \
953 [dest] "+&r"(dest_var), \
954 [count] "+&r"(count_var), \
955 [in_ptr] "+&r"(rc_in_ptr) \
956 : \
957 [top_value] "n"(RC_TOP_VALUE), \
958 [shift_bits] "n"(RC_SHIFT_BITS) \
959 : \
960 "cc", "memory"); \
961} while (0)
962#endif // LZMA_RANGE_DECODER_CONFIG & 0x100
963
964#endif // x86_64
965
966#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette