VirtualBox

Changeset 105490 in vbox for trunk


Ignore:
Timestamp:
Jul 24, 2024 2:49:29 PM (6 months ago)
Author:
vboxsync
Message:

VMM/IEM: Basic infrastructure to natively recompile SIMD floating point instructions, bugref:10652

SIMD floating point operation behavior depends on the guest MXCSR value which needs to be written to the
host's floating point control register (MXCSR on x86, FPCR on arm64 which needs conversion) and needs to be
restored to the host's value when the TB finished execution to avoid inconsistencies in case the guest
changes MXCSR. The ARM implementation does not conform to the x86 behavior because default NaN values have
the sign bit clear on arm64 while they are set on x86. There are rounding differences as well and earlier
ARMv8 revisions don't support the FPCR.FIZ and FPCR.AH features. Should still work out as long as the guest
doesn't try to do funny stuff.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veHlpA-arm64.S

    r105318 r105490  
    257257        brk #1
    258258
     259
     260/**
     261 * Restores the FPCR register from the given argument.
     262 *
     263 * @param    u64RegFpCtrl    (x0)      The value to restore FPCR with.
     264 */
     265ALIGNCODE(IEM_HLP_FUNCTION_ALIGNMENT)
     266BEGINPROC_HIDDEN iemNativeFpCtrlRegRestore
     267#ifdef RT_OS_DARWIN
     268        pacibsp
     269#endif
     270
     271        msr FPCR, x0
     272
     273#ifdef RT_OS_DARWIN
     274        retab
     275#else
     276        ret
     277#endif
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veHlpA.asm

    r105318 r105490  
    281281ENDPROC     iemNativeHlpAsmSafeWrapCheckTlbLookup
    282282
     283
     284;;
     285; This is wrapper function that restores MXCSR when TB execution finished.
     286;
     287; @param    uRegFpCtrl  (gcc:rdi, msc:rcx)  The MXCSR value to restore.
     288;
     289ALIGNCODE(16)
     290BEGINPROC   iemNativeFpCtrlRegRestore
     291        sub     xSP, 4
     292%ifdef RT_OS_WINDOWS
     293        mov     [xSP], edx
     294%else
     295        mov     [xSP], edi
     296%endif
     297
     298        ldmxcsr [xSP]
     299        add     xSP, 4
     300        ret
     301ENDPROC     iemNativeFpCtrlRegRestore
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp

    r105445 r105490  
    10631063#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0)                                                   NOP()
    10641064#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1)                                               NOP()
     1065#define IEM_MC_NATIVE_EMIT_2_EX(a_fnEmitter, a0, a1)                                            NOP()
    10651066#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2)                                           NOP()
    10661067#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3)                                       NOP()
     
    11181119#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)              NOP()
    11191120
    1120 #define IEM_MC_PREPARE_FPU_USAGE()                                                              NOP()
    1121 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ()                                                   NOP()
    1122 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE()                                                 NOP()
    1123 
    1124 #define IEM_MC_PREPARE_SSE_USAGE()                                                              NOP()
    1125 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ()                                                   NOP()
    1126 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE()                                                 NOP()
    1127 
    1128 #define IEM_MC_PREPARE_AVX_USAGE()                                                              NOP()
    1129 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ()                                                   NOP()
    1130 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE()                                                 NOP()
     1121#define IEM_MC_PREPARE_FPU_USAGE()                                                              IEM_LIVENESS_MXCSR_INPUT() /* fxrstor */
     1122#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ()                                                   IEM_LIVENESS_MXCSR_INPUT()
     1123#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE()                                                 IEM_LIVENESS_MXCSR_INPUT()
     1124
     1125#define IEM_MC_PREPARE_SSE_USAGE()                                                              IEM_LIVENESS_MXCSR_INPUT()
     1126#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ()                                                   IEM_LIVENESS_MXCSR_INPUT()
     1127#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE()                                                 IEM_LIVENESS_MXCSR_INPUT()
     1128
     1129#define IEM_MC_PREPARE_AVX_USAGE()                                                              IEM_LIVENESS_MXCSR_INPUT()
     1130#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ()                                                   IEM_LIVENESS_MXCSR_INPUT()
     1131#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE()                                                 IEM_LIVENESS_MXCSR_INPUT()
    11311132
    11321133#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1)                                             NOP()
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r105489 r105490  
    7979#endif
    8080
     81#if defined(IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS) && !defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
     82# error "IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS requires IEMNATIVE_WITH_SIMD_REG_ALLOCATOR"
     83#endif
    8184
    8285
     
    33653368                                                    | IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_SSE
    33663369                                                    | IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_DEVICE_NOT_AVAILABLE);
     3370# endif
     3371
     3372# ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     3373    /* Mark the host floating point control register as not synced if MXCSR is modified. */
     3374    if (fGstShwFlush & RT_BIT_64(kIemNativeGstReg_MxCsr))
     3375        pReNative->fSimdRaiseXcptChecksEmitted &= ~IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SYNCED;
    33673376# endif
    33683377#endif
     
    87568765DECL_INLINE_THROW(uint32_t) iemNativeEmitPrepareFpuForUse(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool fForChange)
    87578766{
    8758     /** @todo this needs a lot more work later. */
     8767#ifndef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
    87598768    RT_NOREF(pReNative, fForChange);
     8769#else
     8770    if (   !(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SYNCED)
     8771        && fForChange)
     8772    {
     8773# ifdef RT_ARCH_AMD64
     8774
     8775        /* Need to save the host MXCSR the first time, and clear the exception flags. */
     8776        if (!(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SAVED))
     8777        {
     8778            PIEMNATIVEINSTR pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     8779
     8780            /* stmxcsr */
     8781            if (IEMNATIVE_REG_FIXED_PVMCPU >= 8)
     8782                pbCodeBuf[off++] = X86_OP_REX_B;
     8783            pbCodeBuf[off++] = 0x0f;
     8784            pbCodeBuf[off++] = 0xae;
     8785            pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 3, IEMNATIVE_REG_FIXED_PVMCPU & 7);
     8786            pbCodeBuf[off++] = RT_BYTE1(RT_UOFFSETOF(VMCPU, iem.s.uRegFpCtrl));
     8787            pbCodeBuf[off++] = RT_BYTE2(RT_UOFFSETOF(VMCPU, iem.s.uRegFpCtrl));
     8788            pbCodeBuf[off++] = RT_BYTE3(RT_UOFFSETOF(VMCPU, iem.s.uRegFpCtrl));
     8789            pbCodeBuf[off++] = RT_BYTE4(RT_UOFFSETOF(VMCPU, iem.s.uRegFpCtrl));
     8790            IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     8791
     8792            pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SAVED;
     8793        }
     8794
     8795        uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off, false /*fPreferVolatile*/);
     8796        uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, kIemNativeGstRegUse_ReadOnly);
     8797
     8798        /*
     8799         * Mask any exceptions and clear the exception status and save into MXCSR,
     8800         * taking a detour through memory here because ldmxcsr/stmxcsr don't support
     8801         * a register source/target (sigh).
     8802         */
     8803        off = iemNativeEmitLoadGprFromGpr32(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, idxRegMxCsr);
     8804        off = iemNativeEmitOrGpr32ByImm(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, X86_MXCSR_XCPT_MASK);
     8805        off = iemNativeEmitAndGpr32ByImm(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, ~X86_MXCSR_XCPT_FLAGS);
     8806        off = iemNativeEmitStoreGprToVCpuU32(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, RT_UOFFSETOF(VMCPU, iem.s.uRegMxcsrTmp));
     8807
     8808        PIEMNATIVEINSTR pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     8809
     8810        /* ldmxcsr */
     8811        if (IEMNATIVE_REG_FIXED_PVMCPU >= 8)
     8812            pbCodeBuf[off++] = X86_OP_REX_B;
     8813        pbCodeBuf[off++] = 0x0f;
     8814        pbCodeBuf[off++] = 0xae;
     8815        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 2, IEMNATIVE_REG_FIXED_PVMCPU & 7);
     8816        pbCodeBuf[off++] = RT_BYTE1(RT_UOFFSETOF(VMCPU, iem.s.uRegMxcsrTmp));
     8817        pbCodeBuf[off++] = RT_BYTE2(RT_UOFFSETOF(VMCPU, iem.s.uRegMxcsrTmp));
     8818        pbCodeBuf[off++] = RT_BYTE3(RT_UOFFSETOF(VMCPU, iem.s.uRegMxcsrTmp));
     8819        pbCodeBuf[off++] = RT_BYTE4(RT_UOFFSETOF(VMCPU, iem.s.uRegMxcsrTmp));
     8820        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     8821
     8822        iemNativeRegFreeTmp(pReNative, idxRegMxCsr);
     8823        iemNativeRegFreeTmp(pReNative, idxRegTmp);
     8824
     8825# elif defined(RT_ARCH_ARM64)
     8826        uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off, false /*fPreferVolatile*/);
     8827
     8828        /* Need to save the host floating point control register the first time, clear FPSR. */
     8829        if (!(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SAVED))
     8830        {
     8831            PIEMNATIVEINSTR pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     8832            pu32CodeBuf[off++] = Armv8A64MkInstrMsr(ARMV8_A64_REG_XZR, ARMV8_AARCH64_SYSREG_FPSR);
     8833            pu32CodeBuf[off++] = Armv8A64MkInstrMrs(idxRegTmp, ARMV8_AARCH64_SYSREG_FPCR);
     8834            off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPU, iem.s.uRegFpCtrl));
     8835            pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SAVED;
     8836        }
     8837
     8838        /*
     8839         * Translate MXCSR to FPCR.
     8840         *
     8841         * Unfortunately we can't emulate the exact behavior of MXCSR as we can't take
     8842         * FEAT_AFP on arm64 for granted (My M2 Macbook doesn't has it). So we can't map
     8843         * MXCSR.DAZ to FPCR.FIZ and MXCSR.FZ to FPCR.FZ with FPCR.AH being set.
     8844         * We can only use FPCR.FZ which will flush inputs _and_ output de-normals to zero.
     8845         */
     8846        /** @todo Check the host supported flags (needs additional work to get the host features from CPUM)
     8847         *        and implement alternate handling if FEAT_AFP is present. */
     8848        uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, kIemNativeGstRegUse_ReadOnly);
     8849
     8850        PIEMNATIVEINSTR pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
     8851
     8852        /* First make sure that there is nothing set for the upper 16-bits (X86_MXCSR_MM, which we don't emulate right now). */
     8853        pu32CodeBuf[off++] = Armv8A64MkInstrUxth(idxRegTmp, idxRegMxCsr);
     8854
     8855        /* If either MXCSR.FZ or MXCSR.DAZ is set FPCR.FZ will be set. */
     8856        pu32CodeBuf[off++] = Armv8A64MkInstrUbfx(IEMNATIVE_REG_FIXED_TMP0, idxRegTmp, X86_MXCSR_DAZ_BIT, 1);
     8857        pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegTmp,              idxRegTmp, X86_MXCSR_FZ_BIT);
     8858        pu32CodeBuf[off++] = Armv8A64MkInstrOrr(idxRegTmp, idxRegTmp, IEMNATIVE_REG_FIXED_TMP0);
     8859        pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(idxRegTmp, idxRegTmp, ARMV8_FPCR_FZ_BIT);
     8860
     8861        /*
     8862         * Init the rounding mode, the layout differs between MXCSR.RM[14:13] and FPCR.RMode[23:22]:
     8863         *
     8864         * Value    MXCSR   FPCR
     8865         *   0       RN      RN
     8866         *   1       R-      R+
     8867         *   2       R+      R-
     8868         *   3       RZ      RZ
     8869         *
     8870         * Conversion can be achieved by switching bit positions
     8871         */
     8872        pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxRegMxCsr, X86_MXCSR_RC_SHIFT);
     8873        pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxRegTmp, IEMNATIVE_REG_FIXED_TMP0, 14, 1);
     8874        pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxRegMxCsr, X86_MXCSR_RC_SHIFT + 1);
     8875        pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxRegTmp, IEMNATIVE_REG_FIXED_TMP0, 13, 1);
     8876
     8877        /* Write the value to FPCR. */
     8878        pu32CodeBuf[off++] = Armv8A64MkInstrMsr(idxRegTmp, ARMV8_AARCH64_SYSREG_FPCR);
     8879
     8880        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     8881        iemNativeRegFreeTmp(pReNative, idxRegMxCsr);
     8882        iemNativeRegFreeTmp(pReNative, idxRegTmp);
     8883# else
     8884#  error "Port me"
     8885# endif
     8886        pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SYNCED;
     8887    }
     8888#endif
    87608889    return off;
    87618890}
     
    998010109    AssertRelease(!(RT_BIT_32(idxRegMxCsr) & IEMNATIVE_CALL_VOLATILE_GREG_MASK));
    998110110
     10111#if 0 /* This is not required right now as the called helper will set up the SSE/AVX state if it is an assembly one. */
    998210112    /*
    998310113     * Need to do the FPU preparation.
    998410114     */
    998510115    off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/);
     10116#endif
    998610117
    998710118    /*
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp

    r105191 r105490  
    114114#endif
    115115
     116#if defined(IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS) && !defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
     117# error "IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS requires IEMNATIVE_WITH_SIMD_REG_ALLOCATOR"
     118#endif
    116119
    117120
     
    28442847        pVCpu->iem.s.pvTbFramePointerR3 = NULL;
    28452848# endif
     2849# ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     2850        /* Restore FPCR/MXCSR if the TB modified it. */
     2851        if (pVCpu->iem.s.uRegFpCtrl != IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED)
     2852        {
     2853            iemNativeFpCtrlRegRestore(pVCpu->iem.s.uRegFpCtrl);
     2854            /* Reset for the next round saving us an unconditional instruction on next TB entry. */
     2855            pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
     2856        }
     2857# endif
    28462858# ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
    28472859        Assert(pVCpu->iem.s.fSkippingEFlags == 0);
     
    31573169                pVCpu->iem.s.cInstructions += pVCpu->iem.s.idxTbCurInstr;
    31583170# endif
     3171
     3172#ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     3173                /* Restore FPCR/MXCSR if the TB modified it. */
     3174                if (pVCpu->iem.s.uRegFpCtrl != IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED)
     3175                {
     3176                    iemNativeFpCtrlRegRestore(pVCpu->iem.s.uRegFpCtrl);
     3177                    /* Reset for the next round saving us an unconditional instruction on next TB entry. */
     3178                    pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
     3179                }
     3180#endif
    31593181            }
    31603182#endif
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r105466 r105490  
    117117#if defined(VBOX_WITH_STATISTICS) || defined(DOXYGEN_RUNNING)
    118118# define IEM_WITH_TLB_STATISTICS
     119#endif
     120
     121/** @def IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     122 * Enable this to use native emitters for certain SIMD FP operations. */
     123#if 1 || defined(DOXYGEN_RUNNING)
     124# define IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
    119125#endif
    120126
     
    20462052#else
    20472053    R3PTRTYPE(void *)       pvUnusedR3;
     2054#endif
     2055#ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     2056    /** The saved host floating point control register (MXCSR on x86, FPCR on arm64)
     2057     * needing restore when the TB finished, IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED indicates the TB
     2058     * didn't modify it so we don't need to restore it. */
     2059# ifdef RT_ARCH_AMD64
     2060    uint32_t                uRegFpCtrl;
     2061    /** Temporary copy of MXCSR for stmxcsr/ldmxcsr (so we don't have to fiddle with stack pointers). */
     2062    uint32_t                uRegMxcsrTmp;
     2063# elif defined(RT_ARCH_ARM64)
     2064    uint64_t                uRegFpCtrl;
     2065# else
     2066#  error "Port me"
     2067# endif
     2068#else
     2069    uint64_t                u64Unused;
    20482070#endif
    20492071    /** Fixed TB used for threaded recompilation.
     
    23392361
    23402362#ifdef IEM_WITH_TLB_TRACE
    2341     uint64_t                au64Padding[3];
     2363    uint64_t                au64Padding[2];
    23422364#else
    2343     uint64_t                au64Padding[5];
     2365    uint64_t                au64Padding[4];
    23442366#endif
    23452367    /** @} */
     
    23902412typedef IEMCPU const *PCIEMCPU;
    23912413
     2414/** @def IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED
     2415 * Value indicating the TB didn't modified the floating point control register.
     2416 * @note Neither FPCR nor MXCSR accept this as a valid value (MXCSR is not fully populated,
     2417 *       FPCR has the upper 32-bit reserved), so this is safe. */
     2418#if defined(IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS) || defined(DOXYGEN_RUNNING)
     2419# ifdef RT_ARCH_AMD64
     2420#  define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT32_MAX
     2421# elif defined(RT_ARCH_ARM64)
     2422#  define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT64_MAX
     2423# else
     2424#  error "Port me"
     2425# endif
     2426#endif
    23922427
    23932428/** @def IEM_GET_CTX
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r105375 r105490  
    13491349/** Flag for indicating that IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() has emitted code in the current TB. */
    13501350# define IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX                         RT_BIT_32(3)
     1351# ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     1352/** Flag indicating that the guest MXCSR was synced to the host floating point control register. */
     1353#  define IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SYNCED                                    RT_BIT_32(4)
     1354/** Flag indicating whether the host floating point control register was saved before overwriting it. */
     1355#  define IEMNATIVE_SIMD_HOST_FP_CTRL_REG_SAVED                                     RT_BIT_32(5)
     1356# endif
    13511357#endif
    13521358
     
    25812587#endif
    25822588
     2589#ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
     2590extern "C" IEM_DECL_NATIVE_HLP_DEF(int, iemNativeFpCtrlRegRestore, (uint64_t u64RegFpCtrl));
     2591#endif
     2592
    25832593#endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
    25842594
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette