Changeset 94338 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Mar 23, 2022 2:00:54 PM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r94303 r94338 3573 3573 3574 3574 3575 /** 3576 * Helper for storing a deconstructed and normal R80 value as a 64-bit one. 3577 * 3578 * This uses the rounding rules indicated by fFcw and returns updated fFsw. 3579 * 3580 * @returns Updated FPU status word value. 3581 * @param fSignIn Incoming sign indicator. 3582 * @param uMantissaIn Incoming mantissa (dot between bit 63 and 62). 3583 * @param iExponentIn Unbiased exponent. 3584 * @param fFcw The FPU control word. 3585 * @param fFsw Prepped FPU status word, i.e. exceptions and C1 clear. 3586 * @param pr64Dst Where to return the output value, if one should be 3587 * returned. 3588 * 3589 * @note Tailored as a helper for iemAImpl_fst_r80_to_r32 right now. 3590 * @note Exact same logic as iemAImpl_StoreNormalR80AsR64. 3591 */ 3592 static uint16_t iemAImpl_StoreNormalR80AsR32(bool fSignIn, uint64_t uMantissaIn, int32_t iExponentIn, 3593 uint16_t fFcw, uint16_t fFsw, PRTFLOAT32U pr32Dst) 3594 { 3595 uint64_t const fRoundedOffMask = RT_BIT_64(RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS) - 1; /* 0x7ff */ 3596 uint64_t const uRoundingAdd = (fFcw & X86_FCW_RC_MASK) == X86_FCW_RC_NEAREST 3597 ? RT_BIT_64(RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS - 1) /* 0x400 */ 3598 : (fFcw & X86_FCW_RC_MASK) == (fSignIn ? X86_FCW_RC_DOWN : X86_FCW_RC_UP) 3599 ? fRoundedOffMask 3600 : 0; 3601 uint64_t fRoundedOff = uMantissaIn & fRoundedOffMask; 3602 3603 /* 3604 * Deal with potential overflows/underflows first, optimizing for none. 3605 * 0 and MAX are used for special values; MAX-1 may be rounded up to MAX. 3606 */ 3607 int32_t iExponentOut = (int32_t)iExponentIn + RTFLOAT32U_EXP_BIAS; 3608 if ((uint32_t)iExponentOut - 1 < (uint32_t)(RTFLOAT32U_EXP_MAX - 3)) 3609 { /* likely? */ } 3610 /* 3611 * Underflow if the exponent zero or negative. This is attempted mapped 3612 * to a subnormal number when possible, with some additional trickery ofc. 3613 */ 3614 else if (iExponentOut <= 0) 3615 { 3616 bool const fIsTiny = iExponentOut < 0 3617 || UINT64_MAX - uMantissaIn > uRoundingAdd; 3618 if (!(fFcw & X86_FCW_UM) && fIsTiny) 3619 /* Note! 754-1985 sec 7.4 has something about bias adjust of 192 here, not in 2008 & 2019. Perhaps only 8087 & 287? */ 3620 return fFsw | X86_FSW_UE | X86_FSW_ES | X86_FSW_B; 3621 3622 if (iExponentOut <= 0) 3623 { 3624 uMantissaIn = iExponentOut <= -63 3625 ? uMantissaIn != 0 3626 : (uMantissaIn >> (-iExponentOut + 1)) | ((uMantissaIn & (RT_BIT_64(-iExponentOut + 1) - 1)) != 0); 3627 fRoundedOff = uMantissaIn & fRoundedOffMask; 3628 if (fRoundedOff && fIsTiny) 3629 fFsw |= X86_FSW_UE; 3630 iExponentOut = 0; 3631 } 3632 } 3633 /* 3634 * Overflow if at or above max exponent value or if we will reach max 3635 * when rounding. Will return +/-zero or +/-max value depending on 3636 * whether we're rounding or not. 3637 */ 3638 else if ( iExponentOut >= RTFLOAT32U_EXP_MAX 3639 || ( iExponentOut == RTFLOAT32U_EXP_MAX - 1 3640 && UINT64_MAX - uMantissaIn <= uRoundingAdd)) 3641 { 3642 fFsw |= X86_FSW_OE; 3643 if (!(fFcw & X86_FCW_OM)) 3644 return fFsw | X86_FSW_ES | X86_FSW_B; 3645 if (fRoundedOff) 3646 { 3647 fFsw |= X86_FSW_PE; 3648 if (uRoundingAdd) 3649 fFsw |= X86_FSW_C1; 3650 if (!(fFcw & X86_FCW_PM)) 3651 fFsw |= X86_FSW_ES | X86_FSW_B; 3652 } 3653 3654 pr32Dst->s.fSign = fSignIn; 3655 if (uRoundingAdd) 3656 { /* Zero */ 3657 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX; 3658 pr32Dst->s.uFraction = 0; 3659 } 3660 else 3661 { /* Max */ 3662 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX - 1; 3663 pr32Dst->s.uFraction = RT_BIT_32(RTFLOAT32U_FRACTION_BITS) - 1; 3664 } 3665 return fFsw; 3666 } 3667 3668 /* 3669 * Normal or subnormal number. 3670 */ 3671 /* Do rounding. */ 3672 uint64_t uMantissaOut = uMantissaIn + uRoundingAdd; 3673 if (uMantissaOut < uMantissaIn) 3674 { 3675 uMantissaOut >>= 1; 3676 iExponentOut++; 3677 Assert(iExponentOut < RTFLOAT32U_EXP_MAX); /* checked above */ 3678 } 3679 /** @todo not sure if this is applied correctly, with the above carry check. */ 3680 else if ( (fFcw & X86_FCW_RC_MASK) == X86_FCW_RC_NEAREST 3681 && !(fRoundedOff & RT_BIT_64(RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS - 1))) 3682 uMantissaOut &= ~(uint64_t)1; 3683 3684 /* Truncat the mantissa and set the return value. */ 3685 uMantissaOut >>= RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS; 3686 3687 pr32Dst->s.uFraction = (uint32_t)uMantissaOut; /* Note! too big for bitfield if normal. */ 3688 pr32Dst->s.uExponent = iExponentOut; 3689 pr32Dst->s.fSign = fSignIn; 3690 3691 /* Set status flags realted to rounding. */ 3692 if (fRoundedOff) 3693 { 3694 fFsw |= X86_FSW_PE; 3695 if (uMantissaOut > (uMantissaIn >> (RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS))) 3696 fFsw |= X86_FSW_C1; 3697 if (!(fFcw & X86_FCW_PM)) 3698 fFsw |= X86_FSW_ES | X86_FSW_B; 3699 } 3700 3701 return fFsw; 3702 } 3703 3704 3705 /** 3706 * @note Exact same logic as iemAImpl_fst_r80_to_r64. 3707 */ 3575 3708 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW, 3576 3709 PRTFLOAT32U pr32Dst, PCRTFLOAT80U pr80Src)) 3577 3710 { 3578 RT_NOREF(pFpuState, pu16FSW, pr32Dst, pr80Src); 3579 AssertReleaseFailed(); 3580 } 3581 3582 3711 uint16_t const fFcw = pFpuState->FCW; 3712 uint16_t fFsw = (7 << X86_FSW_TOP_SHIFT) | (pFpuState->FSW & (X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3)); 3713 if (RTFLOAT80U_IS_NORMAL(pr80Src)) 3714 fFsw = iemAImpl_StoreNormalR80AsR32(pr80Src->s.fSign, pr80Src->s.uMantissa, 3715 (int32_t)pr80Src->s.uExponent - RTFLOAT80U_EXP_BIAS, fFcw, fFsw, pr32Dst); 3716 else if (RTFLOAT80U_IS_ZERO(pr80Src)) 3717 { 3718 pr32Dst->s.fSign = pr80Src->s.fSign; 3719 pr32Dst->s.uExponent = 0; 3720 pr32Dst->s.uFraction = 0; 3721 Assert(RTFLOAT32U_IS_ZERO(pr32Dst)); 3722 } 3723 else if (RTFLOAT80U_IS_INF(pr80Src)) 3724 { 3725 pr32Dst->s.fSign = pr80Src->s.fSign; 3726 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX; 3727 pr32Dst->s.uFraction = 0; 3728 Assert(RTFLOAT32U_IS_INF(pr32Dst)); 3729 } 3730 else if (RTFLOAT80U_IS_INDEFINITE(pr80Src)) 3731 { 3732 /* Mapped to +/-QNaN */ 3733 pr32Dst->s.fSign = pr80Src->s.fSign; 3734 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX; 3735 pr32Dst->s.uFraction = RT_BIT_32(RTFLOAT32U_FRACTION_BITS - 1); 3736 } 3737 else if (RTFLOAT80U_IS_PSEUDO_INF(pr80Src) || RTFLOAT80U_IS_UNNORMAL(pr80Src) || RTFLOAT80U_IS_PSEUDO_NAN(pr80Src)) 3738 { 3739 /* Pseudo-Inf / Pseudo-Nan / Unnormal -> QNaN (during load, probably) */ 3740 if (fFcw & X86_FCW_IM) 3741 { 3742 pr32Dst->s.fSign = 1; 3743 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX; 3744 pr32Dst->s.uFraction = RT_BIT_32(RTFLOAT32U_FRACTION_BITS - 1); 3745 fFsw |= X86_FSW_IE; 3746 } 3747 else 3748 fFsw |= X86_FSW_IE | X86_FSW_ES | X86_FSW_B;; 3749 } 3750 else if (RTFLOAT80U_IS_NAN(pr80Src)) 3751 { 3752 /* IM applies to signalled NaN input only. Everything is converted to quiet NaN. */ 3753 if ((fFcw & X86_FCW_IM) || !RTFLOAT80U_IS_SIGNALLING_NAN(pr80Src)) 3754 { 3755 pr32Dst->s.fSign = pr80Src->s.fSign; 3756 pr32Dst->s.uExponent = RTFLOAT32U_EXP_MAX; 3757 pr32Dst->s.uFraction = (uint32_t)(pr80Src->sj64.uFraction >> (RTFLOAT80U_FRACTION_BITS - RTFLOAT32U_FRACTION_BITS)); 3758 pr32Dst->s.uFraction |= RT_BIT_32(RTFLOAT32U_FRACTION_BITS - 1); 3759 if (RTFLOAT80U_IS_SIGNALLING_NAN(pr80Src)) 3760 fFsw |= X86_FSW_IE; 3761 } 3762 else 3763 fFsw |= X86_FSW_IE | X86_FSW_ES | X86_FSW_B; 3764 } 3765 else 3766 { 3767 /* Denormal values causes both an underflow and precision exception. */ 3768 Assert(RTFLOAT80U_IS_DENORMAL(pr80Src) || RTFLOAT80U_IS_PSEUDO_DENORMAL(pr80Src)); 3769 if (fFcw & X86_FCW_UM) 3770 { 3771 pr32Dst->s.fSign = pr80Src->s.fSign; 3772 pr32Dst->s.uExponent = 0; 3773 if ((fFcw & X86_FCW_RC_MASK) == (!pr80Src->s.fSign ? X86_FCW_RC_UP : X86_FCW_RC_DOWN)) 3774 { 3775 pr32Dst->s.uFraction = 1; 3776 fFsw |= X86_FSW_UE | X86_FSW_PE | X86_FSW_C1; 3777 if (!(fFcw & X86_FCW_PM)) 3778 fFsw |= X86_FSW_ES | X86_FSW_B; 3779 } 3780 else 3781 { 3782 pr32Dst->s.uFraction = 0; 3783 fFsw |= X86_FSW_UE | X86_FSW_PE; 3784 if (!(fFcw & X86_FCW_PM)) 3785 fFsw |= X86_FSW_ES | X86_FSW_B; 3786 } 3787 } 3788 else 3789 fFsw |= X86_FSW_UE | X86_FSW_ES | X86_FSW_B; 3790 } 3791 *pu16FSW = fFsw; 3792 } 3793 3794 3795 /** 3796 * Helper for storing a deconstructed and normal R80 value as a 64-bit one. 3797 * 3798 * This uses the rounding rules indicated by fFcw and returns updated fFsw. 3799 * 3800 * @returns Updated FPU status word value. 3801 * @param fSignIn Incoming sign indicator. 3802 * @param uMantissaIn Incoming mantissa (dot between bit 63 and 62). 3803 * @param iExponentIn Unbiased exponent. 3804 * @param fFcw The FPU control word. 3805 * @param fFsw Prepped FPU status word, i.e. exceptions and C1 clear. 3806 * @param pr64Dst Where to return the output value, if one should be 3807 * returned. 3808 * 3809 * @note Tailored as a helper for iemAImpl_fst_r80_to_r64 right now. 3810 * @note Exact same logic as iemAImpl_StoreNormalR80AsR32. 3811 */ 3812 static uint16_t iemAImpl_StoreNormalR80AsR64(bool fSignIn, uint64_t uMantissaIn, int32_t iExponentIn, 3813 uint16_t fFcw, uint16_t fFsw, PRTFLOAT64U pr64Dst) 3814 { 3815 uint64_t const fRoundedOffMask = RT_BIT_64(RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS) - 1; /* 0x7ff */ 3816 uint32_t const uRoundingAdd = (fFcw & X86_FCW_RC_MASK) == X86_FCW_RC_NEAREST 3817 ? RT_BIT_64(RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS - 1) /* 0x400 */ 3818 : (fFcw & X86_FCW_RC_MASK) == (fSignIn ? X86_FCW_RC_DOWN : X86_FCW_RC_UP) 3819 ? fRoundedOffMask 3820 : 0; 3821 uint32_t fRoundedOff = uMantissaIn & fRoundedOffMask; 3822 3823 /* 3824 * Deal with potential overflows/underflows first, optimizing for none. 3825 * 0 and MAX are used for special values; MAX-1 may be rounded up to MAX. 3826 */ 3827 int32_t iExponentOut = (int32_t)iExponentIn + RTFLOAT64U_EXP_BIAS; 3828 if ((uint32_t)iExponentOut - 1 < (uint32_t)(RTFLOAT64U_EXP_MAX - 3)) 3829 { /* likely? */ } 3830 /* 3831 * Underflow if the exponent zero or negative. This is attempted mapped 3832 * to a subnormal number when possible, with some additional trickery ofc. 3833 */ 3834 else if (iExponentOut <= 0) 3835 { 3836 bool const fIsTiny = iExponentOut < 0 3837 || UINT64_MAX - uMantissaIn > uRoundingAdd; 3838 if (!(fFcw & X86_FCW_UM) && fIsTiny) 3839 /* Note! 754-1985 sec 7.4 has something about bias adjust of 1536 here, not in 2008 & 2019. Perhaps only 8087 & 287? */ 3840 return fFsw | X86_FSW_UE | X86_FSW_ES | X86_FSW_B; 3841 3842 if (iExponentOut <= 0) 3843 { 3844 uMantissaIn = iExponentOut <= -63 3845 ? uMantissaIn != 0 3846 : (uMantissaIn >> (-iExponentOut + 1)) | ((uMantissaIn & (RT_BIT_64(-iExponentOut + 1) - 1)) != 0); 3847 fRoundedOff = uMantissaIn & fRoundedOffMask; 3848 if (fRoundedOff && fIsTiny) 3849 fFsw |= X86_FSW_UE; 3850 iExponentOut = 0; 3851 } 3852 } 3853 /* 3854 * Overflow if at or above max exponent value or if we will reach max 3855 * when rounding. Will return +/-zero or +/-max value depending on 3856 * whether we're rounding or not. 3857 */ 3858 else if ( iExponentOut >= RTFLOAT64U_EXP_MAX 3859 || ( iExponentOut == RTFLOAT64U_EXP_MAX - 1 3860 && UINT64_MAX - uMantissaIn <= uRoundingAdd)) 3861 { 3862 fFsw |= X86_FSW_OE; 3863 if (!(fFcw & X86_FCW_OM)) 3864 return fFsw | X86_FSW_ES | X86_FSW_B; 3865 if (fRoundedOff) 3866 { 3867 fFsw |= X86_FSW_PE; 3868 if (uRoundingAdd) 3869 fFsw |= X86_FSW_C1; 3870 if (!(fFcw & X86_FCW_PM)) 3871 fFsw |= X86_FSW_ES | X86_FSW_B; 3872 } 3873 3874 pr64Dst->s64.fSign = fSignIn; 3875 if (uRoundingAdd) 3876 { /* Zero */ 3877 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX; 3878 pr64Dst->s64.uFraction = 0; 3879 } 3880 else 3881 { /* Max */ 3882 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX - 1; 3883 pr64Dst->s64.uFraction = RT_BIT_64(RTFLOAT64U_FRACTION_BITS) - 1; 3884 } 3885 return fFsw; 3886 } 3887 3888 /* 3889 * Normal or subnormal number. 3890 */ 3891 /* Do rounding. */ 3892 uint64_t uMantissaOut = uMantissaIn + uRoundingAdd; 3893 if (uMantissaOut < uMantissaIn) 3894 { 3895 uMantissaOut >>= 1; 3896 iExponentOut++; 3897 Assert(iExponentOut < RTFLOAT64U_EXP_MAX); /* checked above */ 3898 } 3899 /** @todo not sure if this is applied correctly, with the above carry check. */ 3900 else if ( (fFcw & X86_FCW_RC_MASK) == X86_FCW_RC_NEAREST 3901 && !(fRoundedOff & RT_BIT_32(RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS - 1))) 3902 uMantissaOut &= ~(uint64_t)1; 3903 3904 /* Truncat the mantissa and set the return value. */ 3905 uMantissaOut >>= RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS; 3906 3907 pr64Dst->s64.uFraction = uMantissaOut; /* Note! too big for bitfield if normal. */ 3908 pr64Dst->s64.uExponent = iExponentOut; 3909 pr64Dst->s64.fSign = fSignIn; 3910 3911 /* Set status flags realted to rounding. */ 3912 if (fRoundedOff) 3913 { 3914 fFsw |= X86_FSW_PE; 3915 if (uMantissaOut > (uMantissaIn >> (RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS))) 3916 fFsw |= X86_FSW_C1; 3917 if (!(fFcw & X86_FCW_PM)) 3918 fFsw |= X86_FSW_ES | X86_FSW_B; 3919 } 3920 3921 return fFsw; 3922 } 3923 3924 3925 /** 3926 * @note Exact same logic as iemAImpl_fst_r80_to_r32. 3927 */ 3583 3928 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW, 3584 3929 PRTFLOAT64U pr64Dst, PCRTFLOAT80U pr80Src)) 3585 3930 { 3586 RT_NOREF(pFpuState, pu16FSW, pr64Dst, pr80Src); 3587 AssertReleaseFailed(); 3931 uint16_t const fFcw = pFpuState->FCW; 3932 uint16_t fFsw = (7 << X86_FSW_TOP_SHIFT) | (pFpuState->FSW & (X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3)); 3933 if (RTFLOAT80U_IS_NORMAL(pr80Src)) 3934 fFsw = iemAImpl_StoreNormalR80AsR64(pr80Src->s.fSign, pr80Src->s.uMantissa, 3935 (int32_t)pr80Src->s.uExponent - RTFLOAT80U_EXP_BIAS, fFcw, fFsw, pr64Dst); 3936 else if (RTFLOAT80U_IS_ZERO(pr80Src)) 3937 { 3938 pr64Dst->s64.fSign = pr80Src->s.fSign; 3939 pr64Dst->s64.uExponent = 0; 3940 pr64Dst->s64.uFraction = 0; 3941 Assert(RTFLOAT64U_IS_ZERO(pr64Dst)); 3942 } 3943 else if (RTFLOAT80U_IS_INF(pr80Src)) 3944 { 3945 pr64Dst->s64.fSign = pr80Src->s.fSign; 3946 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX; 3947 pr64Dst->s64.uFraction = 0; 3948 Assert(RTFLOAT64U_IS_INF(pr64Dst)); 3949 } 3950 else if (RTFLOAT80U_IS_INDEFINITE(pr80Src)) 3951 { 3952 /* Mapped to +/-QNaN */ 3953 pr64Dst->s64.fSign = pr80Src->s.fSign; 3954 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX; 3955 pr64Dst->s64.uFraction = RT_BIT_64(RTFLOAT64U_FRACTION_BITS - 1); 3956 } 3957 else if (RTFLOAT80U_IS_PSEUDO_INF(pr80Src) || RTFLOAT80U_IS_UNNORMAL(pr80Src) || RTFLOAT80U_IS_PSEUDO_NAN(pr80Src)) 3958 { 3959 /* Pseudo-Inf / Pseudo-Nan / Unnormal -> QNaN (during load, probably) */ 3960 if (fFcw & X86_FCW_IM) 3961 { 3962 pr64Dst->s64.fSign = 1; 3963 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX; 3964 pr64Dst->s64.uFraction = RT_BIT_64(RTFLOAT64U_FRACTION_BITS - 1); 3965 fFsw |= X86_FSW_IE; 3966 } 3967 else 3968 fFsw |= X86_FSW_IE | X86_FSW_ES | X86_FSW_B;; 3969 } 3970 else if (RTFLOAT80U_IS_NAN(pr80Src)) 3971 { 3972 /* IM applies to signalled NaN input only. Everything is converted to quiet NaN. */ 3973 if ((fFcw & X86_FCW_IM) || !RTFLOAT80U_IS_SIGNALLING_NAN(pr80Src)) 3974 { 3975 pr64Dst->s64.fSign = pr80Src->s.fSign; 3976 pr64Dst->s64.uExponent = RTFLOAT64U_EXP_MAX; 3977 pr64Dst->s64.uFraction = pr80Src->sj64.uFraction >> (RTFLOAT80U_FRACTION_BITS - RTFLOAT64U_FRACTION_BITS); 3978 pr64Dst->s64.uFraction |= RT_BIT_64(RTFLOAT64U_FRACTION_BITS - 1); 3979 if (RTFLOAT80U_IS_SIGNALLING_NAN(pr80Src)) 3980 fFsw |= X86_FSW_IE; 3981 } 3982 else 3983 fFsw |= X86_FSW_IE | X86_FSW_ES | X86_FSW_B; 3984 } 3985 else 3986 { 3987 /* Denormal values causes both an underflow and precision exception. */ 3988 Assert(RTFLOAT80U_IS_DENORMAL(pr80Src) || RTFLOAT80U_IS_PSEUDO_DENORMAL(pr80Src)); 3989 if (fFcw & X86_FCW_UM) 3990 { 3991 pr64Dst->s64.fSign = pr80Src->s.fSign; 3992 pr64Dst->s64.uExponent = 0; 3993 if ((fFcw & X86_FCW_RC_MASK) == (!pr80Src->s.fSign ? X86_FCW_RC_UP : X86_FCW_RC_DOWN)) 3994 { 3995 pr64Dst->s64.uFraction = 1; 3996 fFsw |= X86_FSW_UE | X86_FSW_PE | X86_FSW_C1; 3997 if (!(fFcw & X86_FCW_PM)) 3998 fFsw |= X86_FSW_ES | X86_FSW_B; 3999 } 4000 else 4001 { 4002 pr64Dst->s64.uFraction = 0; 4003 fFsw |= X86_FSW_UE | X86_FSW_PE; 4004 if (!(fFcw & X86_FCW_PM)) 4005 fFsw |= X86_FSW_ES | X86_FSW_B; 4006 } 4007 } 4008 else 4009 fFsw |= X86_FSW_UE | X86_FSW_ES | X86_FSW_B; 4010 } 4011 *pu16FSW = fFsw; 3588 4012 } 3589 4013 … … 3592 4016 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src)) 3593 4017 { 3594 RT_NOREF(pFpuState, pu16FSW, pr80Dst, pr80Src); 3595 AssertReleaseFailed(); 4018 /* 4019 * FPU status word: 4020 * - TOP is irrelevant, but we must match x86 assembly version (0). 4021 * - C1 is always cleared as we don't have any stack overflows. 4022 * - C0, C2, and C3 are undefined and Intel 10980XE does not touch them. 4023 */ 4024 *pu16FSW = pFpuState->FSW & (X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3); /* see iemAImpl_fld1 */ 4025 *pr80Dst = *pr80Src; 3596 4026 } 3597 4027
Note:
See TracChangeset
for help on using the changeset viewer.