VirtualBox

Changeset 72488 in vbox


Ignore:
Timestamp:
Jun 9, 2018 12:24:35 PM (7 years ago)
Author:
vboxsync
Message:

NEM,CPUM,EM: Don't sync in/out the entire state when leaving the inner NEM loop, only what IEM/TRPM might need. Speeds up MMIO and I/O requiring return to ring-3. bugref:9044

Location:
trunk
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r72358 r72488  
    12591259/** @} */
    12601260
     1261/** @name Externalized State Helpers.
     1262 * @{ */
     1263/** @def CPUM_ASSERT_NOT_EXTRN
     1264 * Macro for asserting that @a a_fNotExtrn are present.
     1265 *
     1266 * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
     1267 * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
     1268 *
     1269 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
     1270 */
     1271#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
     1272    AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
     1273              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
     1274
     1275/** @def CPUM_IMPORT_EXTRN_RET
     1276 * Macro for making sure the state specified by @a fExtrnImport is present,
     1277 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
     1278 *
     1279 * Will return if CPUMImportGuestStateOnDemand() fails.
     1280 *
     1281 * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
     1282 * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
     1283 * @thread  EMT(a_pVCpu)
     1284 *
     1285 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
     1286 */
     1287#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
     1288    do { \
     1289        if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
     1290        { /* already present, consider this likely */ } \
     1291        else \
     1292        { \
     1293            int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
     1294            AssertRCReturn(rcCpumImport, rcCpumImport); \
     1295        } \
     1296    } while (0)
     1297
     1298/** @def CPUM_IMPORT_EXTRN_RC
     1299 * Macro for making sure the state specified by @a fExtrnImport is present,
     1300 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
     1301 *
     1302 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
     1303 *
     1304 * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
     1305 * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
     1306 * @param   a_rcStrict      Strict status code variable to update on failure.
     1307 * @thread  EMT(a_pVCpu)
     1308 *
     1309 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
     1310 */
     1311#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
     1312    do { \
     1313        if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
     1314        { /* already present, consider this likely */ } \
     1315        else \
     1316        { \
     1317            int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
     1318            AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
     1319        } \
     1320    } while (0)
     1321
     1322VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport);
     1323/** @} */
     1324
    12611325#ifndef IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS
    12621326
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r72484 r72488  
    2828#include <VBox/vmm/mm.h>
    2929#include <VBox/vmm/em.h>
     30#ifndef IN_RC
     31# include <VBox/vmm/nem.h>
     32# include <VBox/vmm/hm.h>
     33#endif
    3034#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
    3135# include <VBox/vmm/selm.h>
     
    4246#include <iprt/asm-amd64-x86.h>
    4347#ifdef IN_RING3
    44 #include <iprt/thread.h>
     48# include <iprt/thread.h>
    4549#endif
    4650
     
    973977VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
    974978{
     979    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR0));
    975980    return pVCpu->cpum.s.Guest.cr0;
    976981}
     
    979984VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
    980985{
     986    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR2));
    981987    return pVCpu->cpum.s.Guest.cr2;
    982988}
     
    985991VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
    986992{
     993    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR3));
    987994    return pVCpu->cpum.s.Guest.cr3;
    988995}
     
    991998VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
    992999{
     1000    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_CR4));
    9931001    return pVCpu->cpum.s.Guest.cr4;
    9941002}
     
    10131021VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
    10141022{
     1023    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_RIP));
    10151024    return pVCpu->cpum.s.Guest.eip;
    10161025}
     
    10191028VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
    10201029{
     1030    Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_RIP));
    10211031    return pVCpu->cpum.s.Guest.rip;
    10221032}
     
    27712781}
    27722782
     2783
     2784/**
     2785 * Used to dynamically imports state residing in NEM or HM.
     2786 *
     2787 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
     2788 *
     2789 * @returns VBox status code.
     2790 * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
     2791 * @param   fExtrnImport    The fields to import.
     2792 * @thread  EMT(pVCpu)
     2793 */
     2794VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport)
     2795{
     2796    VMCPU_ASSERT_EMT(pVCpu);
     2797    if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
     2798    {
     2799#ifndef IN_RC
     2800        switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
     2801        {
     2802            case CPUMCTX_EXTRN_KEEPER_NEM:
     2803            {
     2804                int rc = NEMImportStateOnDemand(pVCpu, &pVCpu->cpum.s.Guest, fExtrnImport);
     2805                Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
     2806                return rc;
     2807            }
     2808
     2809            case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */
     2810            default:
     2811                AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
     2812        }
     2813#else
     2814        AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
     2815#endif
     2816    }
     2817    return VINF_SUCCESS;
     2818}
     2819
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r72484 r72488  
    1389413894#endif /* IEM_WITH_SETJMP */
    1389513895
    13896 
    13897 /**
    13898  * Used to dynamically imports state residing in NEM or HM.
    13899  *
    13900  * @returns VBox status code.
    13901  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    13902  * @param   pCtx            The CPU context structure.
    13903  * @param   fExtrnImport    The fields to import.
    13904  */
    13905 int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport)
    13906 {
    13907     switch (pCtx->fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
    13908     {
    13909 #ifndef IN_RC
    13910         case CPUMCTX_EXTRN_KEEPER_NEM:
    13911         {
    13912             int rc = NEMImportStateOnDemand(pVCpu, pCtx, fExtrnImport);
    13913             Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
    13914             return rc;
    13915         }
    13916 
    13917         case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */
    13918 #endif
    13919 
    13920         default:
    13921             AssertLogRelMsgFailed(("%RX64\n", fExtrnImport));
    13922 #ifdef IN_RC
    13923             RT_NOREF_PV(pVCpu); RT_NOREF_PV(fExtrnImport);
    13924 #endif
    13925             return VERR_IEM_IPE_9;
    13926     }
    13927 }
    13928 
    13929 
    1393013896/** @}  */
    1393113897
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r72484 r72488  
    10391039    /* Almost done, just update extrn flags and maybe change PGM mode. */
    10401040    pCtx->fExtrn &= ~fWhat;
     1041    if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
     1042        pCtx->fExtrn = 0;
    10411043
    10421044    /* Typical. */
     
    10501052    {
    10511053        int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
    1052         AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
     1054        AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
    10531055    }
    10541056
     
    10561058    {
    10571059        int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
    1058         AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
     1060        AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
    10591061    }
    10601062
     
    12381240
    12391241
    1240 #ifdef LOG_ENABLED
    12411242/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
    1242 # define SWITCH_IT(a_szPrefix) \
     1243#define SWITCH_IT(a_szPrefix) \
    12431244    do \
    12441245        switch (u)\
     
    12561257    while (0)
    12571258
    1258 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API
     1259#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
    12591260/**
    12601261 * Translates the execution stat bitfield into a short log string, VID version.
     
    12751276        SWITCH_IT("RM");
    12761277}
    1277 # elif defined(IN_RING3)
     1278#elif defined(IN_RING3)
    12781279/**
    12791280 * Translates the execution stat bitfield into a short log string, WinHv version.
     
    12941295        SWITCH_IT("RM");
    12951296}
    1296 # endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
    1297 # undef SWITCH_IT
    1298 #endif /* LOG_ENABLED */
     1297#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
     1298#undef SWITCH_IT
    12991299
    13001300
     
    28022802    nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
    28032803    uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
    2804     if (pMsg->ExceptionVector == X86_XCPT_DB)
     2804    if (pExit->VpException.ExceptionType == X86_XCPT_DB)
    28052805        fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
    28062806    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt");
     
    36703670    /*
    36713671     * If the CPU is running, make sure to stop it before we try sync back the
    3672      * state and return to EM.
     3672     * state and return to EM.  We don't sync back the whole state if we can help it.
    36733673     */
    36743674# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
     
    36853685    if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
    36863686    {
    3687 # ifdef IN_RING0
    3688         int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
    3689         if (RT_SUCCESS(rc2))
    3690             pCtx->fExtrn = 0;
    3691         else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
    3692         {
    3693             pCtx->fExtrn = 0;
    3694             if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
    3695                 rcStrict = -rc2;
    3696             else
     3687        /* Try anticipate what we might need. */
     3688        uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
     3689        if (   (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
     3690            || RT_FAILURE(rcStrict))
     3691            fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
     3692        else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
     3693                                          | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
     3694            fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
     3695        if (pCtx->fExtrn & fImport)
     3696        {
     3697#ifdef IN_RING0
     3698            int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
     3699            if (RT_SUCCESS(rc2))
     3700                pCtx->fExtrn &= ~fImport;
     3701            else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
    36973702            {
    3698                 pVCpu->nem.s.rcPending = -rc2;
    3699                 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
     3703                pCtx->fExtrn &= ~fImport;
     3704                if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
     3705                    rcStrict = -rc2;
     3706                else
     3707                {
     3708                    pVCpu->nem.s.rcPending = -rc2;
     3709                    LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
     3710                }
    37003711            }
    3701         }
    37023712# else
    3703         int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
    3704         if (RT_SUCCESS(rc2))
    3705             pCtx->fExtrn = 0;
     3713            int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
     3714            if (RT_SUCCESS(rc2))
     3715                pCtx->fExtrn &= ~fImport;
    37063716# endif
    3707         else if (RT_SUCCESS(rcStrict))
    3708             rcStrict = rc2;
     3717            else if (RT_SUCCESS(rcStrict))
     3718                rcStrict = rc2;
     3719            if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
     3720                pCtx->fExtrn = 0;
     3721            STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
     3722        }
     3723        else
     3724        {
     3725            STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
     3726            //pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
     3727        }
    37093728    }
    37103729    else
     3730    {
     3731        STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
    37113732        pCtx->fExtrn = 0;
     3733    }
    37123734
    37133735    LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
  • trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp

    r72484 r72488  
    21352135    /* Almost done, just update extrn flags and maybe change PGM mode. */
    21362136    pCtx->fExtrn &= ~fWhat;
     2137    if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
     2138        pCtx->fExtrn = 0;
    21372139
    21382140    /* Typical. */
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r72461 r72488  
    3737*********************************************************************************************************************************/
    3838#define LOG_GROUP LOG_GROUP_EM
     39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
    3940#include <VBox/vmm/em.h>
    4041#include <VBox/vmm/vmm.h>
     
    9596#endif
    9697static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
    97 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
    9898
    9999
     
    11981198            fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
    11991199#endif
    1200             rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
     1200            rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
    12011201        }
    12021202
     
    16101610 * Executes all high priority post execution force actions.
    16111611 *
    1612  * @returns rc or a fatal status code.
     1612 * @returns Strict VBox status code.  Typically @a rc, but may be upgraded to
     1613 *          fatal error status code.
    16131614 *
    16141615 * @param   pVM         The cross context VM structure.
    16151616 * @param   pVCpu       The cross context virtual CPU structure.
    1616  * @param   rc          The current rc.
     1617 * @param   rc          The current strict VBox status code rc.
    16171618 */
    1618 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
     1619VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
    16191620{
    16201621    VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
     
    16261627    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    16271628    {
     1629        CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
    16281630        int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
    16291631        if (RT_FAILURE(rc2))
     
    16351637    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    16361638    {
     1639        CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
    16371640        if (CPUMIsGuestInPAEMode(pVCpu))
    16381641        {
     
    16491652    /* IEM has pending work (typically memory write after INS instruction). */
    16501653    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
    1651         rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
     1654        rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
    16521655
    16531656    /* IOM has pending work (comitting an I/O or MMIO write). */
    16541657    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
    1655         rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
     1658        rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
    16561659
    16571660#ifdef VBOX_WITH_RAW_MODE
     
    18291832        if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
    18301833        {
     1834            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    18311835            rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
    18321836            UPDATE_RC();
     
    18761880            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
    18771881        {
     1882            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    18781883            rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
    18791884            UPDATE_RC();
     
    18851890        if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
    18861891        {
     1892            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    18871893            rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
    18881894            UPDATE_RC();
     
    18961902            &&  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
    18971903        {
    1898             PCPUMCTX pCtx = pVCpu->em.s.pCtx;
    1899 
    19001904            /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
    19011905            Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
    1902 
     1906            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
     1907            PCPUMCTX pCtx = pVCpu->em.s.pCtx;
    19031908            CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
    19041909            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
     
    19451950        if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
    19461951        {
     1952            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    19471953            rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
    19481954            UPDATE_RC();
     
    19651971        if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
    19661972        {
     1973            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    19671974            rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
    19681975            if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
     
    20192026        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
    20202027        {
     2028            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    20212029            rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
    20222030            if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
     
    20792087            &&  !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
    20802088        {
     2089            CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
    20812090            if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
    20822091            {
     
    21082117                    {
    21092118                        fWakeupPending = true;
    2110 #ifdef VBOX_STRICT
     2119# ifdef VBOX_STRICT
    21112120                        rcIrq = rc2;
    2112 #endif
     2121# endif
    21132122                    }
    21142123                    if (fResched)
     
    21182127#endif
    21192128                {
     2129                    CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    21202130                    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    21212131#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     
    21302140                        /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
    21312141                        /** @todo this really isn't nice, should properly handle this */
     2142                        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    21322143                        rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
    21332144                        Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
     
    21652176            && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
    21662177        {
     2178            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    21672179            rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
    21682180            UPDATE_RC();
     
    21752187            && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
    21762188        {
     2189            CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
    21772190            rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
    21782191            UPDATE_RC();
  • trunk/src/VBox/VMM/VMMR3/EMHM.cpp

    r70979 r72488  
    2121*********************************************************************************************************************************/
    2222#define LOG_GROUP LOG_GROUP_EM
     23#define VMCPU_INCL_CPUM_GST_CTX
    2324#include <VBox/vmm/em.h>
    2425#include <VBox/vmm/vmm.h>
     
    129130            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    130131        {
    131             rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
     132            rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
    132133            LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    133134        }
     
    468469        if (    VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
    469470            ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    470             rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
     471            rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
    471472
    472473        /*
  • trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp

    r72207 r72488  
    2121*********************************************************************************************************************************/
    2222#define LOG_GROUP LOG_GROUP_EM
     23#define VMCPU_INCL_CPUM_GST_CTX
    2324#include <VBox/vmm/em.h>
    2425#include <VBox/vmm/vmm.h>
     
    7475
    7576/**
    76  * Executes instruction in HM mode if we can.
     77 * Executes instruction in NEM mode if we can.
    7778 *
    7879 * This is somewhat comparable to REMR3EmulateInstruction.
     
    9091VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
    9192{
    92     PCPUMCTX pCtx = pVCpu->em.s.pCtx;
     93    Assert(pVCpu->em.s.pCtx == &pVCpu->cpum.GstCtx);
    9394    Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
    9495
    95     if (!NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
     96    if (!NEMR3CanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
    9697        return VINF_EM_RESCHEDULE;
    9798
    98     uint64_t const uOldRip = pCtx->rip;
     99    uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip;
    99100    for (;;)
    100101    {
     
    105106            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    106107        {
    107             VBOXSTRICTRC rcStrict = emR3NemForcedActions(pVM, pVCpu, pCtx);
     108            VBOXSTRICTRC rcStrict = emR3NemForcedActions(pVM, pVCpu, &pVCpu->cpum.GstCtx);
    108109            if (rcStrict != VINF_SUCCESS)
    109110            {
     
    129130            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    130131        {
    131             rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
     132            rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
    132133            LogFlow(("emR3NemSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    133134        }
     
    135136        if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
    136137        {
    137             rcStrict = emR3NemHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict));
     138            rcStrict = emR3NemHandleRC(pVM, pVCpu, &pVCpu->cpum.GstCtx, VBOXSTRICTRC_TODO(rcStrict));
    138139            Log(("emR3NemSingleInstruction: emR3NemHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    139140        }
     
    142143         * Done?
    143144         */
     145        CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
    144146        if (   (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
    145147            || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
    146             || pCtx->rip != uOldRip)
    147         {
    148             if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip)
     148            || pVCpu->cpum.GstCtx.rip != uOldRip)
     149        {
     150            if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.rip != uOldRip)
    149151                rcStrict = VINF_EM_DBG_STEPPED;
    150             Log(("emR3NemSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip));
     152            Log(("emR3NemSingleInstruction: returns %Rrc (rip %llx -> %llx)\n",
     153                 VBOXSTRICTRC_VAL(rcStrict), uOldRip, pVCpu->cpum.GstCtx.rip));
     154            CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
    151155            return rcStrict;
    152156        }
     
    172176#endif
    173177{
    174 #ifdef LOG_ENABLED
     178#if defined(LOG_ENABLED)
    175179    PCPUMCTX pCtx = pVCpu->em.s.pCtx;
    176180#endif
     
    195199     */
    196200    STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
     201    CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    197202    rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
    198203    STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
     
    203208#ifdef VBOX_WITH_REM
    204209        STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
     210        CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
    205211        EMRemLock(pVM);
    206212        /* Flush the recompiler TLB if the VCPU has changed. */
     
    216222#endif /* !VBOX_WITH_REM */
    217223    }
    218 
    219 #ifdef EM_NOTIFY_HM
    220     if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
    221         HMR3NotifyEmulated(pVCpu);
    222 #endif
    223224    return rc;
    224225}
     
    278279     * Hand it over to the interpreter.
    279280     */
     281    CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    280282    VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
    281283    LogFlow(("emR3NemExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     
    443445         */
    444446        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
    445         if (    VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
    446             ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    447             rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
     447        if (   VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
     448            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
     449            rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
    448450
    449451        /*
     
    478480
    479481    /*
    480      * Return to outer loop.
    481      */
     482     * Return to outer loop, making sure the fetch all state as we leave.
     483     *
     484     * Note! Not using CPUM_IMPORT_EXTRN_RET here, to prioritize an rcStrict error
     485     *       status over import errors.
     486     */
     487    if (pCtx->fExtrn)
     488    {
     489        int rcImport = NEMImportStateOnDemand(pVCpu, pCtx, pCtx->fExtrn);
     490        AssertReturn(RT_SUCCESS(rcImport) || RT_FAILURE_NP(rcStrict), rcImport);
     491    }
    482492#if defined(LOG_ENABLED) && defined(DEBUG)
    483493    RTLogFlush(NULL);
  • trunk/src/VBox/VMM/VMMR3/EMRaw.cpp

    r69111 r72488  
    2121*********************************************************************************************************************************/
    2222#define LOG_GROUP LOG_GROUP_EM
     23#define VMCPU_INCL_CPUM_GST_CTX
    2324#include <VBox/vmm/em.h>
    2425#include <VBox/vmm/vmm.h>
     
    140141     * Deal with the return code.
    141142     */
    142     rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
     143    rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
    143144    rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
    144145    rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
     
    220221     * Deal with the return codes.
    221222     */
    222     rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
     223    rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
    223224    rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
    224225    rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
     
    14261427        if (    VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
    14271428            ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    1428             rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
     1429            rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
    14291430
    14301431#ifdef VBOX_STRICT
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r72484 r72488  
    12351235                            STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus,       STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks",           "/NEM/CPU%u/BreakOnStatus", iCpu);
    12361236                            STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports",      "/NEM/CPU%u/ImportOnDemand", iCpu);
     1237                            STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", iCpu);
     1238                            STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", iCpu);
    12371239                        }
    12381240
  • trunk/src/VBox/VMM/include/EMHandleRCTmpl.h

    r72208 r72488  
    4646#endif
    4747{
     48    NOREF(pCtx);
     49
    4850    switch (rc)
    4951    {
     
    111113            AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
    112114            break;
    113 #endif /* EMHANDLERC_WITH_PATM */
    114 
    115 #ifdef EMHANDLERC_WITH_PATM
     115
    116116        /*
    117117         * Memory mapped I/O access - attempt to patch the instruction
     
    166166         */
    167167        case VINF_PGM_CHANGE_MODE:
     168            CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
    168169            rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
    169170            if (rc == VINF_SUCCESS)
     
    240241         */
    241242        case VINF_GIM_R3_HYPERCALL:
    242         {
    243             /*
    244              * Currently hypercall instruction (vmmcall) emulation is compiled and
    245              * implemented only when nested hw. virt feature is enabled in IEM.
    246              *
    247              * On Intel or when nested hardware virtualization support isn't compiled
    248              * we still need to implement hypercalls rather than throw a #UD.
    249              */
    250 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    251             if (pVM->cpum.ro.GuestFeatures.fSvm)
    252             {
    253                 rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
    254                 break;
    255             }
    256 #endif
    257             /** @todo IEM/REM need to handle VMCALL/VMMCALL, see
    258              *        @bugref{7270#c168}. */
    259             uint8_t cbInstr = 0;
    260             VBOXSTRICTRC rcStrict = GIMExecHypercallInstr(pVCpu, pCtx, &cbInstr);
    261             if (rcStrict == VINF_SUCCESS)
    262             {
    263                 Assert(cbInstr);
    264                 pCtx->rip += cbInstr;
    265                 /* Update interrupt inhibition. */
    266                 if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    267                     && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    268                     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    269                 rc = VINF_SUCCESS;
    270             }
    271             else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
    272                 rc = VINF_SUCCESS;
    273             else
    274             {
    275                 Assert(rcStrict != VINF_GIM_R3_HYPERCALL);
    276                 rc = VBOXSTRICTRC_VAL(rcStrict);
    277             }
    278             break;
    279         }
     243            rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
     244            break;
    280245
    281246#ifdef EMHANDLERC_WITH_HM
     
    308273            rc = emR3ExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
    309274            break;
    310 #endif
    311 
    312 #ifdef EMHANDLERC_WITH_PATM
     275
    313276        case VINF_PATM_PENDING_IRQ_AFTER_IRET:
    314277            rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
     
    324287
    325288        case VINF_EM_RAW_INJECT_TRPM_EVENT:
     289            CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    326290            rc = VBOXSTRICTRC_VAL(IEMInjectTrpmEvent(pVCpu));
    327291            /* The following condition should be removed when IEM_IMPLEMENTS_TASKSWITCH becomes true. */
  • trunk/src/VBox/VMM/include/EMInternal.h

    r72462 r72488  
    474474EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    475475int     emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
    476 int     emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
     476VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
    477477
    478478int     emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r72485 r72488  
    813813        else \
    814814        { \
    815             int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
     815            int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    816816            AssertRCReturn(rcCtxImport, rcCtxImport); \
    817817        } \
     
    833833        else \
    834834        { \
    835             int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
     835            int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    836836            AssertLogRelRC(rcCtxImport); \
    837837        } \
     
    855855        else \
    856856        { \
    857             int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
     857            int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    858858            AssertRCStmt(rcCtxImport, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), rcCtxImport)); \
    859859        } \
    860860    } while (0)
    861861
    862 int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport);
    863862
    864863
  • trunk/src/VBox/VMM/include/NEMInternal.h

    r72484 r72488  
    272272    STAMCOUNTER                 StatBreakOnStatus;
    273273    STAMCOUNTER                 StatImportOnDemand;
     274    STAMCOUNTER                 StatImportOnReturn;
     275    STAMCOUNTER                 StatImportOnReturnSkipped;
    274276    /** @} */
    275277#endif /* RT_OS_WINDOWS */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette