VirtualBox

Changeset 105856 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 24, 2024 1:54:27 AM (5 months ago)
Author:
vboxsync
Message:

VMM/IEM: Don't flush PC prior to indirect jumps, flush it when in the #GP(0) code path. bugref:10720 bugref:10373

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r105855 r105856  
    686686
    687687
     688/*********************************************************************************************************************************
     689*   Common code for changing PC/RIP/EIP/IP.                                                                                      *
     690*********************************************************************************************************************************/
     691
     692/**
     693 * Emits code to check if the content of @a idxAddrReg is a canonical address,
     694 * raising a \#GP(0) if it isn't.
     695 *
     696 * @returns New code buffer offset, UINT32_MAX on failure.
     697 * @param   pReNative       The native recompile state.
     698 * @param   off             The code buffer offset.
     699 * @param   idxAddrReg      The host register with the address to check.
     700 * @param   idxInstr        The current instruction.
     701 */
     702DECL_FORCE_INLINE_THROW(uint32_t)
     703iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxAddrReg, uint8_t idxInstr)
     704{
     705    /*
     706     * Make sure we don't have any outstanding guest register writes as we may
     707     * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
     708     */
     709    off = iemNativeRegFlushPendingWrites(pReNative, off);
     710
     711#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     712    off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     713#else
     714    RT_NOREF(idxInstr);
     715#endif
     716
     717#ifdef RT_ARCH_AMD64
     718    /*
     719     * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0)
     720     *     return raisexcpt();
     721     * ---- this variant avoid loading a 64-bit immediate, but is an instruction longer.
     722     */
     723    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     724
     725    off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg);
     726    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32);
     727    off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000);
     728    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16);
     729    off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
     730
     731    iemNativeRegFreeTmp(pReNative, iTmpReg);
     732
     733#elif defined(RT_ARCH_ARM64)
     734    /*
     735     * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0)
     736     *     return raisexcpt();
     737     * ----
     738     *     mov     x1, 0x800000000000
     739     *     add     x1, x0, x1
     740     *     cmp     xzr, x1, lsr 48
     741     *     b.ne    .Lraisexcpt
     742     */
     743    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     744
     745    off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000));
     746    off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg);
     747    off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, iTmpReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr);
     748    off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
     749
     750    iemNativeRegFreeTmp(pReNative, iTmpReg);
     751
     752#else
     753# error "Port me"
     754#endif
     755    return off;
     756}
     757
     758
     759/**
     760 * Emits code to check if the content of @a idxAddrReg is a canonical address,
     761 * raising a \#GP(0) if it isn't.
     762 *
     763 * Caller makes sure everything is flushed, except maybe PC.
     764 *
     765 * @returns New code buffer offset, UINT32_MAX on failure.
     766 * @param   pReNative       The native recompile state.
     767 * @param   off             The code buffer offset.
     768 * @param   idxAddrReg      The host register with the address to check.
     769 * @param   idxOldPcReg     Register holding the old PC that offPc is relative
     770 *                          to if available, otherwise UINT8_MAX.
     771 * @param   idxInstr        The current instruction.
     772 * @tparam  a_fAbsolute     Not sure why we have this yet.
     773 */
     774template<bool const a_fAbsolute>
     775DECL_FORCE_INLINE_THROW(uint32_t)
     776iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithOldPc(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     777                                                     uint8_t idxAddrReg, uint8_t idxOldPcReg, uint8_t idxInstr)
     778{
     779#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     780    Assert(pReNative->Core.bmGstRegShadowDirty == 0);
     781#endif
     782
     783#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     784# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     785    if (!pReNative->Core.offPc)
     786# endif
     787        off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     788#else
     789    RT_NOREF(idxInstr);
     790#endif
     791
     792#ifdef RT_ARCH_AMD64
     793    /*
     794     * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0)
     795     *     return raisexcpt();
     796     * ---- this variant avoid loading a 64-bit immediate, but is an instruction longer.
     797     */
     798    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     799
     800    off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg);
     801    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32);
     802    off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000);
     803    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16);
     804
     805#elif defined(RT_ARCH_ARM64)
     806    /*
     807     * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0)
     808     *     return raisexcpt();
     809     * ----
     810     *     mov     x1, 0x800000000000
     811     *     add     x1, x0, x1
     812     *     cmp     xzr, x1, lsr 48
     813     *     b.ne    .Lraisexcpt
     814     */
     815    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     816
     817    off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000));
     818    off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg);
     819    off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, iTmpReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr);
     820#else
     821# error "Port me"
     822#endif
     823
     824#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     825    if (pReNative->Core.offPc)
     826    {
     827        /** @todo On x86, it is said that conditional jumps forward are statically
     828         *        predicited as not taken, so this isn't a very good construct.
     829         *        Investigate whether it makes sense to invert it and add another
     830         *        jump.  Also, find out wtf the static predictor does here on arm! */
     831        uint32_t const offFixup = off;
     832        off = iemNativeEmitJzToFixed(pReNative, off, off + 16 /*8-bit suffices*/);
     833
     834        /* Raising a GP(0), but first we need to update cpum.GstCtx.rip. */
     835        if (idxOldPcReg == UINT8_MAX)
     836        {
     837            idxOldPcReg = iTmpReg;
     838            off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxOldPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     839        }
     840        off = iemNativeEmitAddGprImm(pReNative, off, idxOldPcReg, pReNative->Core.offPc);
     841        off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxOldPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     842# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     843        off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     844# endif
     845        off = iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0, false /*fActuallyExitingTb*/);
     846        iemNativeFixupFixedJump(pReNative, offFixup, off);
     847    }
     848    else
     849#endif
     850        off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
     851
     852    iemNativeRegFreeTmp(pReNative, iTmpReg);
     853
     854    return off;
     855}
     856
     857
     858/**
     859 * Emits code to check if that the content of @a idxAddrReg is within the limit
     860 * of CS, raising a \#GP(0) if it isn't.
     861 *
     862 * @returns New code buffer offset; throws VBox status code on error.
     863 * @param   pReNative       The native recompile state.
     864 * @param   off             The code buffer offset.
     865 * @param   idxAddrReg      The host register (32-bit) with the address to
     866 *                          check.
     867 * @param   idxInstr        The current instruction.
     868 */
     869DECL_FORCE_INLINE_THROW(uint32_t)
     870iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     871                                                      uint8_t idxAddrReg, uint8_t idxInstr)
     872{
     873    /*
     874     * Make sure we don't have any outstanding guest register writes as we may
     875     * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
     876     */
     877    off = iemNativeRegFlushPendingWrites(pReNative, off);
     878
     879#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     880    off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     881#else
     882    RT_NOREF(idxInstr);
     883#endif
     884
     885    uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     886                                                                (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + X86_SREG_CS),
     887                                                                kIemNativeGstRegUse_ReadOnly);
     888
     889    off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, idxRegCsLim);
     890    off = iemNativeEmitJaTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
     891
     892    iemNativeRegFreeTmp(pReNative, idxRegCsLim);
     893    return off;
     894}
     895
     896
     897
     898
     899/**
     900 * Emits code to check if that the content of @a idxAddrReg is within the limit
     901 * of CS, raising a \#GP(0) if it isn't.
     902 *
     903 * Caller makes sure everything is flushed, except maybe PC.
     904 *
     905 * @returns New code buffer offset; throws VBox status code on error.
     906 * @param   pReNative       The native recompile state.
     907 * @param   off             The code buffer offset.
     908 * @param   idxAddrReg      The host register (32-bit) with the address to
     909 *                          check.
     910 * @param   idxOldPcReg     Register holding the old PC that offPc is relative
     911 *                          to if available, otherwise UINT8_MAX.
     912 * @param   idxInstr        The current instruction.
     913 * @tparam  a_fAbsolute     Not sure why we have this yet.
     914 */
     915template<bool const a_fAbsolute>
     916DECL_FORCE_INLINE_THROW(uint32_t)
     917iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0WithOldPc(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     918                                                               uint8_t idxAddrReg, uint8_t idxOldPcReg, uint8_t idxInstr)
     919{
     920#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     921    Assert(pReNative->Core.bmGstRegShadowDirty == 0);
     922#endif
     923
     924#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     925# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     926    if (!pReNative->Core.offPc)
     927# endif
     928        off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     929#else
     930    RT_NOREF(idxInstr);
     931#endif
     932
     933    uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     934                                                                (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + X86_SREG_CS),
     935                                                                kIemNativeGstRegUse_ReadOnly);
     936
     937    off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, idxRegCsLim);
     938#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     939    if (pReNative->Core.offPc)
     940    {
     941        uint32_t const offFixup = off;
     942        off = iemNativeEmitJbeToFixed(pReNative, off, off + 16 /*8-bit suffices*/);
     943
     944        /* Raising a GP(0), but first we need to update cpum.GstCtx.rip. */
     945        if (idxOldPcReg == UINT8_MAX)
     946        {
     947            idxOldPcReg = idxAddrReg;
     948            off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxOldPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     949        }
     950        off = iemNativeEmitAddGprImm(pReNative, off, idxOldPcReg, pReNative->Core.offPc);
     951        off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxOldPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     952# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     953        off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
     954# endif
     955        off = iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0, false /*fActuallyExitingTb*/);
     956        iemNativeFixupFixedJump(pReNative, offFixup, off);
     957    }
     958    else
     959#endif
     960        off = iemNativeEmitJaTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
     961
     962    iemNativeRegFreeTmp(pReNative, idxRegCsLim);
     963    return off;
     964}
     965
    688966
    689967/*********************************************************************************************************************************
     
    10851363    IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarPc, cbVar);
    10861364
    1087     /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
    1088     off = iemNativeRegFlushPendingWrites(pReNative, off);
    1089 
    1090 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    1091     Assert(pReNative->Core.offPc == 0);
    1092     STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    1093 #endif
     1365    /* If we can't rule out a #GP(0) below, flush all dirty register except for
     1366       PC which will be handled specially by the two workers below if they raise a GP. */
     1367    bool const    fMayRaiseGp0 = (f64Bit && cbVar > sizeof(uint32_t)) || (!f64Bit && !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec));
     1368    uint8_t const idxOldPcReg  = fMayRaiseGp0
     1369                               ? iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off, kIemNativeGstReg_Pc)
     1370                               : UINT8_MAX;
     1371    if (fMayRaiseGp0)
     1372        off = iemNativeRegFlushPendingWrites(pReNative, off, RT_BIT_64(kIemNativeGstReg_Pc) /*fGstShwExcept*/);
    10941373
    10951374    /* Get a register with the new PC loaded from idxVarPc.
     
    10971376    uint8_t const idxPcReg = iemNativeVarRegisterAcquireForGuestReg(pReNative, idxVarPc, kIemNativeGstReg_Pc, &off);
    10981377
    1099     /* Check limit (may #GP(0) + exit TB). */
    1100     if (!f64Bit)
    1101 /** @todo we can skip this test in FLAT 32-bit mode. */
    1102         off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
    1103     /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
    1104     else if (cbVar > sizeof(uint32_t))
    1105         off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     1378    /* Check that the target is within CS.LIM / is canonical (may #GP(0) + exit TB). */
     1379    if (fMayRaiseGp0)
     1380    {
     1381        if (f64Bit)
     1382            off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithOldPc<true>(pReNative, off, idxPcReg, idxOldPcReg, idxInstr);
     1383        else
     1384            off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0WithOldPc<true>(pReNative, off, idxPcReg,
     1385                                                                                       idxOldPcReg, idxInstr);
     1386    }
    11061387
    11071388    /* Store the result. */
    11081389    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    11091390
    1110 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1391#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     1392    pReNative->Core.offPc = 0;
     1393    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
     1394# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
    11111395    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
    11121396    pReNative->Core.fDebugPcInitialized = true;
    11131397    Log4(("uPcUpdatingDebug=rip off=%#x\n", off));
    1114 #endif
    1115 
     1398# endif
     1399#endif
     1400
     1401    if (idxOldPcReg != UINT8_MAX)
     1402        iemNativeRegFreeTmp(pReNative, idxOldPcReg);
    11161403    iemNativeVarRegisterRelease(pReNative, idxVarPc);
    11171404    /** @todo implictly free the variable? */
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r105855 r105856  
    56825682{
    56835683#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    5684     if (!(fGstShwExcept & kIemNativeGstReg_Pc))
     5684    if (!(fGstShwExcept & RT_BIT_64(kIemNativeGstReg_Pc)))
    56855685        off = iemNativeEmitPcWriteback(pReNative, off);
    56865686#else
     
    63686368    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    63696369    RT_NOREF_PV(idxInstr);
    6370     return off;
    6371 }
    6372 
    6373 
    6374 /**
    6375  * Emits code to check if the content of @a idxAddrReg is a canonical address,
    6376  * raising a \#GP(0) if it isn't.
    6377  *
    6378  * @returns New code buffer offset, UINT32_MAX on failure.
    6379  * @param   pReNative       The native recompile state.
    6380  * @param   off             The code buffer offset.
    6381  * @param   idxAddrReg      The host register with the address to check.
    6382  * @param   idxInstr        The current instruction.
    6383  */
    6384 DECL_HIDDEN_THROW(uint32_t)
    6385 iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxAddrReg, uint8_t idxInstr)
    6386 {
    6387     /*
    6388      * Make sure we don't have any outstanding guest register writes as we may
    6389      * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
    6390      */
    6391     off = iemNativeRegFlushPendingWrites(pReNative, off);
    6392 
    6393 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
    6394     off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
    6395 #else
    6396     RT_NOREF(idxInstr);
    6397 #endif
    6398 
    6399 #ifdef RT_ARCH_AMD64
    6400     /*
    6401      * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0)
    6402      *     return raisexcpt();
    6403      * ---- this wariant avoid loading a 64-bit immediate, but is an instruction longer.
    6404      */
    6405     uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
    6406 
    6407     off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg);
    6408     off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32);
    6409     off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000);
    6410     off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16);
    6411     off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
    6412 
    6413     iemNativeRegFreeTmp(pReNative, iTmpReg);
    6414 
    6415 #elif defined(RT_ARCH_ARM64)
    6416     /*
    6417      * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0)
    6418      *     return raisexcpt();
    6419      * ----
    6420      *     mov     x1, 0x800000000000
    6421      *     add     x1, x0, x1
    6422      *     cmp     xzr, x1, lsr 48
    6423      *     b.ne    .Lraisexcpt
    6424      */
    6425     uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
    6426 
    6427     off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000));
    6428     off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg);
    6429     off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, iTmpReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr);
    6430     off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
    6431 
    6432     iemNativeRegFreeTmp(pReNative, iTmpReg);
    6433 
    6434 #else
    6435 # error "Port me"
    6436 #endif
    6437     return off;
    6438 }
    6439 
    6440 
    6441 /**
    6442  * Emits code to check if that the content of @a idxAddrReg is within the limit
    6443  * of CS, raising a \#GP(0) if it isn't.
    6444  *
    6445  * @returns New code buffer offset; throws VBox status code on error.
    6446  * @param   pReNative       The native recompile state.
    6447  * @param   off             The code buffer offset.
    6448  * @param   idxAddrReg      The host register (32-bit) with the address to
    6449  *                          check.
    6450  * @param   idxInstr        The current instruction.
    6451  */
    6452 DECL_HIDDEN_THROW(uint32_t)
    6453 iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    6454                                                       uint8_t idxAddrReg, uint8_t idxInstr)
    6455 {
    6456     /*
    6457      * Make sure we don't have any outstanding guest register writes as we may
    6458      * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
    6459      */
    6460     off = iemNativeRegFlushPendingWrites(pReNative, off);
    6461 
    6462 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
    6463     off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
    6464 #else
    6465     RT_NOREF(idxInstr);
    6466 #endif
    6467 
    6468     uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
    6469                                                                 (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + X86_SREG_CS),
    6470                                                                 kIemNativeGstRegUse_ReadOnly);
    6471 
    6472     off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, idxRegCsLim);
    6473     off = iemNativeEmitJaTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0);
    6474 
    6475     iemNativeRegFreeTmp(pReNative, idxRegCsLim);
    64766370    return off;
    64776371}
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r105855 r105856  
    18131813DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    18141814                                                                        uint8_t idxAddrReg, uint8_t idxInstr);
    1815 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1816                                                                                   uint8_t idxAddrReg, uint8_t idxInstr);
    18171815DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLeaGprByGstRegRef(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGprDst,
    18181816                                                           IEMNATIVEGSTREGREF enmClass, uint8_t idxRegInClass);
     
    24642462                               uint64_t fGstSimdShwExcept = 0)
    24652463{
     2464#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     2465    uint64_t const fWritebackPc            = ~fGstShwExcept & RT_BIT_64(kIemNativeGstReg_Pc);
     2466#else
     2467    uint64_t const fWritebackPc            = 0;
     2468#endif
    24662469#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    24672470    uint64_t const bmGstRegShadowDirty     = pReNative->Core.bmGstRegShadowDirty & ~fGstShwExcept;
     
    24752478#else
    24762479    uint64_t const bmGstSimdRegShadowDirty = 0;
    2477 #endif
    2478 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    2479     uint64_t const fWritebackPc            = ~(fGstShwExcept & kIemNativeGstReg_Pc);
    2480 #else
    2481     uint64_t const fWritebackPc            = 0;
    24822480#endif
    24832481    if (bmGstRegShadowDirty | bmGstSimdRegShadowDirty | fWritebackPc)
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h

    r105853 r105856  
    84328432
    84338433
    8434 DECL_INLINE_THROW(uint32_t) iemNativeEmitTbExit(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVELABELTYPE enmExitReason)
     8434DECL_INLINE_THROW(uint32_t)
     8435iemNativeEmitTbExit(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVELABELTYPE enmExitReason,
     8436                    bool fActuallyExitingTb = true)
    84358437{
    84368438    Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason));
    84378439
    8438     iemNativeMarkCurCondBranchAsExiting(pReNative);
     8440    if (fActuallyExitingTb)
     8441        iemNativeMarkCurCondBranchAsExiting(pReNative);
    84398442
    84408443#ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette