Changeset 94612 in vbox
- Timestamp:
- Apr 15, 2022 1:10:57 AM (3 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/types.h
r94605 r94612 956 956 /** The max exponent value for the RTFLOAT32U format. */ 957 957 #define RTFLOAT32U_EXP_MAX (255) 958 /** The exponent bias underflow adjust for the RTFLOAT32U format.959 * @note 754-1985 sec 7. 4, not mentioned in later standard versions. */960 #define RTFLOAT32U_EXP_BIAS_ UNDERFLOW_ADJUST(192)958 /** The exponent bias overflow/underflow adjust for the RTFLOAT32U format. 959 * @note 754-1985 sec 7.3 & 7.4, not mentioned in later standard versions. */ 960 #define RTFLOAT32U_EXP_BIAS_ADJUST (192) 961 961 /** Fraction width (in bits) for the RTFLOAT32U format. */ 962 962 #define RTFLOAT32U_FRACTION_BITS (23) … … 1060 1060 /** The max exponent value for the RTFLOAT64U format. */ 1061 1061 #define RTFLOAT64U_EXP_MAX (2047) 1062 /** The exponent bias underflow adjust for the RTFLOAT64U format.1063 * @note 754-1985 sec 7. 4, not mentioned in later standard versions. */1064 #define RTFLOAT64U_EXP_BIAS_ UNDERFLOW_ADJUST(1536)1062 /** The exponent bias overflow/underflow adjust for the RTFLOAT64U format. 1063 * @note 754-1985 sec 7.3 & 7.4, not mentioned in later standard versions. */ 1064 #define RTFLOAT64U_EXP_BIAS_ADJUST (1536) 1065 1065 /** Fraction width (in bits) for the RTFLOAT64U format. */ 1066 1066 #define RTFLOAT64U_FRACTION_BITS (52) … … 1189 1189 /** The max exponent value for the RTFLOAT80U format. */ 1190 1190 # define RTFLOAT80U_EXP_MAX (32767) 1191 /** The exponent bias underflow adjust for the RTFLOAT80U format.1192 * @note 754-1985 sec 7. 4, not mentioned in later standard versions. */1193 # define RTFLOAT80U_EXP_BIAS_ UNDERFLOW_ADJUST(24576)1191 /** The exponent bias overflow/underflow adjust for the RTFLOAT80U format. 1192 * @note 754-1985 sec 7.3 & 7.4, not mentioned in later standard versions. */ 1193 # define RTFLOAT80U_EXP_BIAS_ADJUST (24576) 1194 1194 /** Fraction width (in bits) for the RTFLOAT80U format. */ 1195 1195 # define RTFLOAT80U_FRACTION_BITS (63) … … 1481 1481 /** The max exponent value for the RTFLOAT128U format. */ 1482 1482 #define RTFLOAT128U_EXP_MAX (32767) 1483 /** The exponent bias underflow adjust for the RTFLOAT128U format.1483 /** The exponent bias overflow/underflow adjust for the RTFLOAT128U format. 1484 1484 * @note This is stipulated based on RTFLOAT80U, it doesn't appear in any 1485 1485 * standard text as far as we know. */ 1486 #define RTFLOAT128U_EXP_BIAS_ UNDERFLOW_ADJUST(24576)1486 #define RTFLOAT128U_EXP_BIAS_ADJUST (24576) 1487 1487 /** Fraction width (in bits) for the RTFLOAT128U format. */ 1488 1488 #define RTFLOAT128U_FRACTION_BITS (112) -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r94608 r94612 4830 4830 else 4831 4831 { 4832 iExponent += RTFLOAT80U_EXP_BIAS_ UNDERFLOW_ADJUST;4832 iExponent += RTFLOAT80U_EXP_BIAS_ADJUST; 4833 4833 fFsw |= X86_FSW_ES | X86_FSW_B; 4834 4834 } … … 4971 4971 4972 4972 4973 /** Worker for iemAImpl_fmul_r80_by_r80. */ 4974 static uint16_t iemAImpl_fmul_f80_r80_worker(PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2, PRTFLOAT80U pr80Result, 4975 uint16_t fFcw, uint16_t fFsw, PCRTFLOAT80U pr80Val1Org) 4976 { 4977 softfloat_state_t SoftState = IEM_SOFTFLOAT_STATE_INITIALIZER_FROM_FCW(fFcw); 4978 extFloat80_t r80XResult = extF80_mul(iemFpuSoftF80FromIprt(pr80Val1), iemFpuSoftF80FromIprt(pr80Val2), &SoftState); 4979 return iemFpuSoftStateAndF80ToFswAndIprtResult(&SoftState, r80XResult, pr80Result, fFcw, fFsw, pr80Val1Org); 4980 } 4981 4982 4973 4983 IEM_DECL_IMPL_DEF(void, iemAImpl_fmul_r80_by_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, 4974 4984 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2)) 4975 4985 { 4976 RT_NOREF(pFpuState, pFpuRes, pr80Val1, pr80Val2); 4977 AssertReleaseFailed(); 4986 uint16_t const fFcw = pFpuState->FCW; 4987 uint16_t fFsw = (pFpuState->FSW & (X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3)) | (6 << X86_FSW_TOP_SHIFT); 4988 4989 /* SoftFloat does not check for Pseudo-Infinity, Pseudo-Nan and Unnormals. */ 4990 if (RTFLOAT80U_IS_387_INVALID(pr80Val1) || RTFLOAT80U_IS_387_INVALID(pr80Val2)) 4991 { 4992 if (fFcw & X86_FCW_IM) 4993 pFpuRes->r80Result = g_r80Indefinite; 4994 else 4995 { 4996 pFpuRes->r80Result = *pr80Val1; 4997 fFsw |= X86_FSW_ES | X86_FSW_B; 4998 } 4999 fFsw |= X86_FSW_IE; 5000 } 5001 /* SoftFloat does not check for denormals and certainly not report them to us. NaNs trumps denormals. */ 5002 else if ( (RTFLOAT80U_IS_DENORMAL_OR_PSEUDO_DENORMAL(pr80Val1) && !RTFLOAT80U_IS_NAN(pr80Val2)) 5003 || (RTFLOAT80U_IS_DENORMAL_OR_PSEUDO_DENORMAL(pr80Val2) && !RTFLOAT80U_IS_NAN(pr80Val1)) ) 5004 { 5005 if (fFcw & X86_FCW_DM) 5006 { 5007 PCRTFLOAT80U const pr80Val1Org = pr80Val1; 5008 IEM_NORMALIZE_PSEUDO_DENORMAL(pr80Val1, r80Val1Normalized); 5009 IEM_NORMALIZE_PSEUDO_DENORMAL(pr80Val2, r80Val2Normalized); 5010 fFsw = iemAImpl_fmul_f80_r80_worker(pr80Val1, pr80Val2, &pFpuRes->r80Result, fFcw, fFsw, pr80Val1Org); 5011 } 5012 else 5013 { 5014 pFpuRes->r80Result = *pr80Val1; 5015 fFsw |= X86_FSW_ES | X86_FSW_B; 5016 } 5017 fFsw |= X86_FSW_DE; 5018 } 5019 /* SoftFloat can handle the rest: */ 5020 else 5021 fFsw = iemAImpl_fmul_f80_r80_worker(pr80Val1, pr80Val2, &pFpuRes->r80Result, fFcw, fFsw, pr80Val1); 5022 5023 pFpuRes->FSW = fFsw; 4978 5024 } 4979 5025 … … 5133 5179 5134 5180 5135 /** Worker for iemAImpl_fsub_r80_by_r80 . */5181 /** Worker for iemAImpl_fsub_r80_by_r80 and iemAImpl_fsubr_r80_by_r80. */ 5136 5182 static uint16_t iemAImpl_fsub_f80_r80_worker(PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2, PRTFLOAT80U pr80Result, 5137 5183 uint16_t fFcw, uint16_t fFsw, PCRTFLOAT80U pr80Val1Org) -
trunk/src/libs/softfloat-3e/source/s_roundPackToExtF80.c
r94606 r94612 40 40 #include "internals.h" 41 41 #include "softfloat.h" 42 #include <iprt/types.h> /* VBox: RTFLOAT80U_EXP_BIAS_UNDERFLOW_ADJUST */ 43 //#include <iprt/assert.h> 44 45 extFloat80_t 46 softfloat_roundPackToExtF80( 42 #include <iprt/types.h> /* VBox: RTFLOAT80U_EXP_BIAS_ADJUST */ 43 #include <iprt/assert.h> 44 45 #if 1 /* This approach for bias adjust is somewhat cleaner, though a bit slower. But it works correctly. */ 46 static extFloat80_t 47 softfloat_roundPackToExtF80Inner( 47 48 bool sign, 48 49 int_fast32_t exp, … … 56 57 bool roundNearEven; 57 58 uint_fast64_t roundIncrement, roundMask, roundBits; 58 bool isTiny, doIncrement ;59 bool isTiny, doIncrement = 0; 59 60 struct uint64_extra sig64Extra; 60 61 union { struct extFloat80M s; extFloat80_t f; } uZ; … … 90 91 /*---------------------------------------------------------------- 91 92 *----------------------------------------------------------------*/ 92 bool fUnmaskedUnderflow = false; /* VBox: unmasked underflow bias */93 93 isTiny = 94 94 (softfloat_detectTininess … … 96 96 || (exp < 0) 97 97 || (sig <= (uint64_t) (sig + roundIncrement)); 98 if ( (pState->exceptionMask & softfloat_flag_underflow) /* VBox: unmasked underflow bias */ 99 || (exp == -63 && sig == 0 && sigExtra == 0) /* zero */ ) { /* VBox: unmasked underflow bias */ 100 sig = softfloat_shiftRightJam64(sig, 1 - exp); 101 } else { /* VBox: unmasked underflow bias */ 102 //RTAssertMsg2("softfloat_roundPackToExtF80: #UE - bias adj: %d -> %d; sig=%#RX64\n", exp, exp + RTFLOAT80U_EXP_BIAS_UNDERFLOW_ADJUST, sig); /* VBox: unmasked underflow bias */ 103 softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); /* VBox: unmasked underflow bias */ 104 exp += RTFLOAT80U_EXP_BIAS_UNDERFLOW_ADJUST; /* VBox: unmasked underflow bias */ 105 fUnmaskedUnderflow = true; /* VBox: unmasked underflow bias */ 106 } /* VBox: unmasked underflow bias */ 107 uint64_t const uOldSig = sig; /* VBox */ 98 sig = softfloat_shiftRightJam64(sig, 1 - exp); 99 uint64_t const uOldSig = sig; /* VBox: C1 */ 108 100 roundBits = sig & roundMask; 109 101 if ( roundBits ) { … … 117 109 } 118 110 sig += roundIncrement; 119 if ( !fUnmaskedUnderflow ) { /* VBox: unmasked underflow bias */ 120 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 121 } /* VBox: unmasked underflow bias */ 111 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 122 112 roundIncrement = roundMask + 1; 123 113 if ( roundNearEven && (roundBits<<1 == roundIncrement) ) { … … 125 115 } 126 116 sig &= ~roundMask; 127 if ( sig > uOldSig ) { /* VBox: C1 */128 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */129 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #1\n"); /* VBox: C1 */130 } /* VBox: C1 */117 if ( sig > uOldSig ) { /* VBox: C1 */ 118 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 119 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #1\n"); /* VBox: C1 */ 120 } /* VBox: C1 */ 131 121 goto packReturn; 132 122 } … … 140 130 /*------------------------------------------------------------------------ 141 131 *------------------------------------------------------------------------*/ 142 { /* VBox: C1 */143 uint64_t const uOldSig = sig; /* VBox: C1 */132 { /* VBox: C1 */ 133 uint64_t const uOldSig = sig; /* VBox: C1 */ 144 134 if ( roundBits ) { 145 135 softfloat_exceptionFlags |= softfloat_flag_inexact; … … 147 137 if ( roundingMode == softfloat_round_odd ) { 148 138 sig = (sig & ~roundMask) | (roundMask + 1); 149 if ( sig > uOldSig ) { /* VBox: C1 */150 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */151 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #2\n"); /* VBox: C1 */152 } /* VBox: C1 */139 if ( sig > uOldSig ) { /* VBox: C1 */ 140 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 141 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #2\n"); /* VBox: C1 */ 142 } /* VBox: C1 */ 153 143 goto packReturn; 154 144 } … … 159 149 ++exp; 160 150 sig = UINT64_C( 0x8000000000000000 ); 161 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */162 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #3\n"); /* VBox: C1 */151 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 152 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #3\n"); /* VBox: C1 */ 163 153 } 164 154 roundIncrement = roundMask + 1; … … 167 157 } 168 158 sig &= ~roundMask; 169 if ( sig > uOldSig ) { /* VBox: C1 */170 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */171 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #4\n"); /* VBox: C1 */172 } /* VBox: C1 */159 if ( sig > uOldSig ) { /* VBox: C1 */ 160 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 161 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #4\n"); /* VBox: C1 */ 162 } /* VBox: C1 */ 173 163 goto packReturn; 174 } /* VBox: C1 */164 } /* VBox: C1 */ 175 165 /*------------------------------------------------------------------------ 176 166 *------------------------------------------------------------------------*/ … … 189 179 /*---------------------------------------------------------------- 190 180 *----------------------------------------------------------------*/ 191 bool fUnmaskedUnderflow = false; /* VBox: unmasked underflow bias */192 181 isTiny = 193 182 (softfloat_detectTininess … … 196 185 || ! doIncrement 197 186 || (sig < UINT64_C( 0xFFFFFFFFFFFFFFFF )); 198 if ( (pState->exceptionMask & softfloat_flag_underflow) /* VBox: unmasked underflow bias */ 199 || (exp == -63 && sig == 0 && sigExtra == 0) /* zero */ ) { /* VBox: unmasked underflow bias */ 200 sig64Extra = 201 softfloat_shiftRightJam64Extra( sig, sigExtra, 1 - exp ); 202 exp = 0; 203 sig = sig64Extra.v; 204 sigExtra = sig64Extra.extra; 205 } else { /* VBox: unmasked underflow bias */ 206 //RTAssertMsg2("softfloat_roundPackToExtF80: #UE/80 - bias adj: %d -> %d; sig=%#RX64'%016RX64\n", exp, exp + RTFLOAT80U_EXP_BIAS_UNDERFLOW_ADJUST, sig, sigExtra); /* VBox: unmasked underflow bias */ 207 softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); /* VBox: unmasked underflow bias */ 208 exp += RTFLOAT80U_EXP_BIAS_UNDERFLOW_ADJUST; /* VBox: unmasked underflow bias */ 209 fUnmaskedUnderflow = true; /* VBox: unmasked underflow bias */ 210 } /* VBox: unmasked underflow bias */ 211 if ( sigExtra ) { 187 //RTAssertMsg2("softfloat_roundPackToExtF80: #2: sig=%#RX64 sigExtra=%#RX64 isTiny=%d exp=%d 1-exp=%d\n", sig, sigExtra, isTiny, exp, 1-exp); 188 sig64Extra = 189 softfloat_shiftRightJam64Extra( sig, sigExtra, 1 - exp ); 190 if ( exp < -63 || sig64Extra.extra != 0 ) { /* VBox: Missing inexact result flag */ 191 softfloat_exceptionFlags |= softfloat_flag_inexact; /* VBox: Missing inexact result flag */ 192 } /* VBox: Missing inexact result flag */ 193 exp = 0; 194 sig = sig64Extra.v; 195 sigExtra = sig64Extra.extra; 196 //RTAssertMsg2("softfloat_roundPackToExtF80: #2: sig=%#RX64 sigExtra=%#RX64 isTiny=%d\n", sig, sigExtra, isTiny); 197 if ( sigExtra 198 || ( !(pState->exceptionMask & softfloat_flag_underflow) /* VBox: Unmasked underflow conditions differ */ 199 && (sig != 0 || sigExtra != 0) /*zero*/ ) ) { /* VBox: Unmasked underflow conditions differ */ 212 200 if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); 213 201 #ifdef SOFTFLOAT_ROUND_ODD … … 229 217 } 230 218 if ( doIncrement ) { 231 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */232 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #5\n"); /* VBox: C1 */219 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 220 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #5\n"); /* VBox: C1 */ 233 221 ++sig; 234 222 sig &= … … 236 224 (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) 237 225 & roundNearEven); 238 if ( fUnmaskedUnderflow ) { /* VBox: unmasked underflow bias */ 239 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 240 } else if ((sig & UINT64_C( 0x8000000000000000 )) != 0) { /* VBox: unmasked underflow bias */ 241 exp++; /* VBox: unmasked underflow bias */ 242 } /* VBox: unmasked underflow bias */ 226 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 243 227 } 244 228 goto packReturn; … … 264 248 exp = 0x7FFF; 265 249 sig = UINT64_C( 0x8000000000000000 ); 266 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 - Returning infinity means we've rounded up. */ 267 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #6\n"); 268 269 /* VBox: HACK ALERT! Some utterly weird behaviour, found with 'fadd 0,max', precision < 64 and rounding away from 0. */ 270 if ( !(pState->exceptionMask & softfloat_flag_overflow) ) /* VBox */ 271 exp = 8191; /* => -8192 */ /* VBox */ 250 softfloat_exceptionFlags |= softfloat_flag_c1; /* Inf means rounding up */ /* VBox: C1 */ 251 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #6\n"); /* VBox: C1 */ 272 252 } else { 273 253 exp = 0x7FFE; … … 294 274 ++exp; 295 275 sig = UINT64_C( 0x8000000000000000 ); 296 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */297 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #7\n"); /* VBox: C1 */276 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 277 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #7\n"); /* VBox: C1 */ 298 278 } else { 299 279 sig &= … … 301 281 (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) 302 282 & roundNearEven); 303 if ( sig > uOldSig ) { /* VBox: C1 */304 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */305 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #8\n"); /* VBox: C1 */283 if ( sig > uOldSig ) { /* VBox: C1 */ 284 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 285 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #8\n"); /* VBox: C1 */ 306 286 } 307 287 } … … 316 296 } 317 297 298 extFloat80_t 299 softfloat_roundPackToExtF80( 300 bool sign, 301 int_fast32_t exp, 302 uint_fast64_t sig, 303 uint_fast64_t sigExtra, 304 uint_fast8_t roundingPrecision 305 SOFTFLOAT_STATE_DECL_COMMA 306 ) 307 { 308 uint8_t const exceptionFlagsSaved = softfloat_exceptionFlags; 309 softfloat_exceptionFlags = 0; 310 extFloat80_t r80Result = softfloat_roundPackToExtF80Inner(sign, exp, sig, sigExtra, roundingPrecision, pState); 311 312 if ( !(softfloat_exceptionFlags & ~pState->exceptionMask & (softfloat_flag_underflow | softfloat_flag_overflow)) ) { 313 /* likely */ 314 } else { 315 softfloat_exceptionFlags &= softfloat_flag_underflow | softfloat_flag_overflow; 316 if ( softfloat_exceptionFlags & softfloat_flag_underflow ) { 317 exp = (exp + RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; 318 } else { 319 exp = (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; 320 } 321 r80Result = softfloat_roundPackToExtF80Inner(sign, exp, sig, sigExtra, roundingPrecision, pState); 322 } 323 324 softfloat_exceptionFlags |= exceptionFlagsSaved; 325 return r80Result; 326 } 327 328 #else /* Messy integrated approach for bias adjust that doesn't quite work 100%: */ 329 330 extFloat80_t 331 softfloat_roundPackToExtF80( 332 bool sign, 333 int_fast32_t exp, 334 uint_fast64_t sig, 335 uint_fast64_t sigExtra, 336 uint_fast8_t roundingPrecision 337 SOFTFLOAT_STATE_DECL_COMMA 338 ) 339 { 340 uint_fast8_t roundingMode; 341 bool roundNearEven; 342 uint_fast64_t roundIncrement, roundMask, roundBits; 343 bool isTiny, doIncrement = 0; 344 struct uint64_extra sig64Extra; 345 union { struct extFloat80M s; extFloat80_t f; } uZ; 346 RTAssertMsg2("softfloat_roundPackToExtF80: exp=%d sig=%RX64 sigExtra=%RX64 rp=%d\n", exp, sig, sigExtra, roundingPrecision); 347 348 /*------------------------------------------------------------------------ 349 *------------------------------------------------------------------------*/ 350 roundingMode = softfloat_roundingMode; 351 roundNearEven = (roundingMode == softfloat_round_near_even); 352 if ( roundingPrecision == 80 ) goto precision80; 353 if ( roundingPrecision == 64 ) { 354 roundIncrement = UINT64_C( 0x0000000000000400 ); 355 roundMask = UINT64_C( 0x00000000000007FF ); 356 } else if ( roundingPrecision == 32 ) { 357 roundIncrement = UINT64_C( 0x0000008000000000 ); 358 roundMask = UINT64_C( 0x000000FFFFFFFFFF ); 359 } else { 360 goto precision80; 361 } 362 sig |= (sigExtra != 0); 363 if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { 364 roundIncrement = 365 (roundingMode 366 == (sign ? softfloat_round_min : softfloat_round_max)) 367 ? roundMask 368 : 0; 369 } 370 roundBits = sig & roundMask; 371 /*------------------------------------------------------------------------ 372 *------------------------------------------------------------------------*/ 373 if ( 0x7FFD <= (uint32_t) (exp - 1) ) { 374 if ( exp <= 0 ) { 375 /*---------------------------------------------------------------- 376 *----------------------------------------------------------------*/ 377 bool fUnmaskedUnderflow = false; /* VBox: unmasked underflow bias adjust */ 378 isTiny = 379 (softfloat_detectTininess 380 == softfloat_tininess_beforeRounding) 381 || (exp < 0) 382 || (sig <= (uint64_t) (sig + roundIncrement)); 383 if ( (pState->exceptionMask & softfloat_flag_underflow) /* VBox: unmasked underflow bias adjust */ 384 || (exp == -63 && sig == 0 && sigExtra == 0) /* zero */ ) { /* VBox: unmasked underflow bias adjust */ 385 sig = softfloat_shiftRightJam64(sig, 1 - exp); 386 } else { /* VBox: unmasked underflow bias adjust */ 387 //RTAssertMsg2("softfloat_roundPackToExtF80: #UE - bias adj: %d -> %d; sig=%#RX64\n", exp, exp + RTFLOAT80U_EXP_BIAS_ADJUST, sig); /* VBox: unmasked underflow bias adjust */ 388 softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); /* VBox: unmasked underflow bias adjust */ 389 exp += RTFLOAT80U_EXP_BIAS_ADJUST; /* VBox: unmasked underflow bias adjust */ 390 fUnmaskedUnderflow = true; /* VBox: unmasked underflow bias adjust */ 391 } /* VBox: unmasked underflow bias adjust */ 392 uint64_t const uOldSig = sig; /* VBox: C1 */ 393 roundBits = sig & roundMask; 394 if ( roundBits ) { 395 if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); 396 softfloat_exceptionFlags |= softfloat_flag_inexact; 397 #ifdef SOFTFLOAT_ROUND_ODD 398 if ( roundingMode == softfloat_round_odd ) { 399 sig |= roundMask + 1; 400 } 401 #endif 402 } 403 sig += roundIncrement; 404 if ( !fUnmaskedUnderflow ) { /* VBox: unmasked underflow bias adjust */ 405 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 406 } /* VBox: unmasked underflow bias adjust */ 407 roundIncrement = roundMask + 1; 408 if ( roundNearEven && (roundBits<<1 == roundIncrement) ) { 409 roundMask |= roundIncrement; 410 } 411 sig &= ~roundMask; 412 if ( sig > uOldSig ) { /* VBox: C1 */ 413 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 414 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #1\n"); /* VBox: C1 */ 415 } /* VBox: C1 */ 416 goto packReturn; 417 } 418 if ( 419 (0x7FFE < exp) 420 || ((exp == 0x7FFE) && ((uint64_t) (sig + roundIncrement) < sig)) 421 ) { 422 if ( !(pState->exceptionMask & softfloat_flag_overflow) ) { /* VBox: unmasked overflow bias adjust */ 423 //RTAssertMsg2("softfloat_roundPackToExtF80: #OE - bias adj: %d -> %d; sig=%#RX64'%016RX64\n", exp, (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX, sig, sigExtra); /* VBox: unmasked underflow bias adjust */ 424 exp = (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; /* VBox: unmasked overflow bias adjust */ 425 softfloat_raiseFlags( softfloat_flag_overflow /* VBox: unmasked overflow bias adjust */ 426 | softfloat_flag_inexact SOFTFLOAT_STATE_ARG_COMMA ); /* VBox: unmasked overflow bias adjust */ 427 } else { /* VBox: unmasked overflow bias adjust */ 428 //RTAssertMsg2("softfloat_roundPackToExtF80: #OE - masked\n"); 429 goto overflow; 430 } /* VBox: unmasked overflow bias adjust */ 431 } 432 } 433 /*------------------------------------------------------------------------ 434 *------------------------------------------------------------------------*/ 435 { /* VBox: C1 */ 436 uint64_t const uOldSig = sig; /* VBox: C1 */ 437 if ( roundBits ) { 438 softfloat_exceptionFlags |= softfloat_flag_inexact; 439 #ifdef SOFTFLOAT_ROUND_ODD 440 if ( roundingMode == softfloat_round_odd ) { 441 sig = (sig & ~roundMask) | (roundMask + 1); 442 if ( sig > uOldSig ) { /* VBox: C1 */ 443 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 444 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #2\n"); /* VBox: C1 */ 445 } /* VBox: C1 */ 446 goto packReturn; 447 } 448 #endif 449 } 450 sig = (uint64_t) (sig + roundIncrement); 451 if ( sig < roundIncrement ) { 452 ++exp; 453 sig = UINT64_C( 0x8000000000000000 ); 454 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 455 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #3\n"); /* VBox: C1 */ 456 } 457 roundIncrement = roundMask + 1; 458 if ( roundNearEven && (roundBits<<1 == roundIncrement) ) { 459 roundMask |= roundIncrement; 460 } 461 sig &= ~roundMask; 462 if ( sig > uOldSig ) { /* VBox: C1 */ 463 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 464 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #4\n"); /* VBox: C1 */ 465 } /* VBox: C1 */ 466 goto packReturn; 467 } /* VBox: C1 */ 468 /*------------------------------------------------------------------------ 469 *------------------------------------------------------------------------*/ 470 precision80: 471 doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); 472 if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { 473 doIncrement = 474 (roundingMode 475 == (sign ? softfloat_round_min : softfloat_round_max)) 476 && sigExtra; 477 } 478 /*------------------------------------------------------------------------ 479 *------------------------------------------------------------------------*/ 480 if ( 0x7FFD <= (uint32_t) (exp - 1) ) { 481 if ( exp <= 0 ) { 482 /*---------------------------------------------------------------- 483 *----------------------------------------------------------------*/ 484 bool fUnmaskedUnderflow = false; /* VBox: unmasked underflow bias adjust */ 485 isTiny = 486 (softfloat_detectTininess 487 == softfloat_tininess_beforeRounding) 488 || (exp < 0) 489 || ! doIncrement 490 || (sig < UINT64_C( 0xFFFFFFFFFFFFFFFF )); 491 if ( exp == -63 && sig == 0 && sigExtra == 0 /* zero */ ) { /* VBox: unmasked underflow bias adjust */ 492 exp = 0; /* VBox: unmasked underflow bias adjust */ 493 } else if ( pState->exceptionMask & softfloat_flag_underflow ) { /* VBox: unmasked underflow bias adjust */ 494 sig64Extra = 495 softfloat_shiftRightJam64Extra( sig, sigExtra, 1 - exp ); 496 if ( exp < -63 || sig64Extra.extra != 0 ) { /* VBox: unmasked underflow bias adjust */ 497 softfloat_exceptionFlags |= softfloat_flag_inexact; /* VBox: unmasked underflow bias adjust */ 498 } /* VBox: unmasked underflow bias adjust */ 499 exp = 0; 500 sig = sig64Extra.v; 501 sigExtra = sig64Extra.extra; 502 } else { /* VBox: unmasked underflow bias adjust */ 503 RTAssertMsg2("softfloat_roundPackToExtF80: #UE/80 - bias adj: %d -> %d; sig=%#RX64'%016RX64 t=%d\n", /* VBox: unmasked underflow bias adjust */ 504 exp, (exp + RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX, sig, sigExtra, isTiny); /* VBox: unmasked underflow bias adjust */ 505 softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); /* VBox: unmasked underflow bias adjust */ 506 exp = (exp + RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; /* VBox: unmasked underflow bias adjust */ 507 fUnmaskedUnderflow = true; /* VBox: unmasked underflow bias adjust */ 508 } /* VBox: unmasked underflow bias adjust */ 509 if ( sigExtra ) { 510 if ( isTiny ) softfloat_raiseFlags( softfloat_flag_underflow SOFTFLOAT_STATE_ARG_COMMA ); 511 #ifdef SOFTFLOAT_ROUND_ODD 512 if ( roundingMode == softfloat_round_odd ) { 513 sig |= 1; 514 goto packReturn; 515 } 516 #endif 517 } 518 doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); 519 if ( 520 ! roundNearEven 521 && (roundingMode != softfloat_round_near_maxMag) 522 ) { 523 doIncrement = 524 (roundingMode 525 == (sign ? softfloat_round_min : softfloat_round_max)) 526 && sigExtra; 527 } 528 if ( doIncrement ) { 529 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 530 RTAssertMsg2("softfloat_roundPackToExtF80: C1 #5\n"); /* VBox: C1 */ 531 ++sig; 532 sig &= 533 ~(uint_fast64_t) 534 (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) 535 & roundNearEven); 536 if ( !fUnmaskedUnderflow ) { /* VBox: unmasked underflow bias adjust */ 537 exp = ((sig & UINT64_C( 0x8000000000000000 )) != 0); 538 } /* VBox: unmasked underflow bias adjust */ 539 } 540 goto packReturn; 541 } 542 if ( 543 (0x7FFE < exp) 544 || ((exp == 0x7FFE) && (sig == UINT64_C( 0xFFFFFFFFFFFFFFFF )) 545 && doIncrement) 546 ) { 547 /*---------------------------------------------------------------- 548 *----------------------------------------------------------------*/ 549 roundMask = 0; 550 overflow: 551 softfloat_raiseFlags( 552 softfloat_flag_overflow | softfloat_flag_inexact 553 SOFTFLOAT_STATE_ARG_COMMA ); 554 if ( 555 roundNearEven 556 || (roundingMode == softfloat_round_near_maxMag) 557 || (roundingMode 558 == (sign ? softfloat_round_min : softfloat_round_max)) 559 ) { 560 if ( !(pState->exceptionMask & softfloat_flag_overflow) ) { /* VBox: unmasked overflow bias adjust */ 561 //RTAssertMsg2("softfloat_roundPackToExtF80: #OE/80a - bias adj: %d -> %d; sig=%#RX64'%016RX64\n", exp, (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX, sig, sigExtra); /* VBox: unmasked underflow bias adjust */ 562 exp = (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; /* VBox: unmasked overflow bias adjust */ 563 } else { /* VBox: unmasked overflow bias adjust */ 564 exp = 0x7FFF; 565 sig = UINT64_C( 0x8000000000000000 ); 566 softfloat_exceptionFlags |= softfloat_flag_c1; /* Inf means rounding up */ /* VBox: C1 */ 567 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #6\n"); /* VBox: C1 */ 568 goto packReturn; 569 } /* VBox: unmasked overflow bias adjust */ 570 } else { 571 if ( !(pState->exceptionMask & softfloat_flag_overflow) ) { /* VBox: unmasked overflow bias adjust */ 572 //RTAssertMsg2("softfloat_roundPackToExtF80: #OE/80b - bias adj: %d -> %d; sig=%#RX64'%016RX64\n", exp, (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX, sig, sigExtra); /* VBox: unmasked underflow bias adjust */ 573 exp = (exp - RTFLOAT80U_EXP_BIAS_ADJUST) & RTFLOAT80U_EXP_MAX; /* VBox: unmasked overflow bias adjust */ 574 } else { /* VBox: unmasked overflow bias adjust */ 575 exp = 0x7FFE; 576 sig = ~roundMask; 577 goto packReturn; 578 } 579 } 580 } 581 } 582 /*------------------------------------------------------------------------ 583 *------------------------------------------------------------------------*/ 584 if ( sigExtra ) { 585 softfloat_exceptionFlags |= softfloat_flag_inexact; 586 #ifdef SOFTFLOAT_ROUND_ODD 587 if ( roundingMode == softfloat_round_odd ) { 588 sig |= 1; 589 goto packReturn; 590 } 591 #endif 592 } 593 if ( doIncrement ) { 594 uint64_t const uOldSig = sig; /* VBox */ 595 ++sig; 596 if ( ! sig ) { 597 ++exp; 598 sig = UINT64_C( 0x8000000000000000 ); 599 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 600 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #7\n"); /* VBox: C1 */ 601 } else { 602 sig &= 603 ~(uint_fast64_t) 604 (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) 605 & roundNearEven); 606 if ( sig > uOldSig ) { /* VBox: C1 */ 607 softfloat_exceptionFlags |= softfloat_flag_c1; /* VBox: C1 */ 608 //RTAssertMsg2("softfloat_roundPackToExtF80: C1 #8\n"); /* VBox: C1 */ 609 } 610 } 611 } 612 /*------------------------------------------------------------------------ 613 *------------------------------------------------------------------------*/ 614 packReturn: 615 uZ.s.signExp = packToExtF80UI64( sign, exp ); 616 uZ.s.signif = sig; 617 return uZ.f; 618 619 } 620 621 #endif /* Messy integrated approach for bias adjust that doesn't quite work 100%: */
Note:
See TracChangeset
for help on using the changeset viewer.