VirtualBox

Changeset 13883 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Nov 5, 2008 5:04:48 PM (16 years ago)
Author:
vboxsync
Message:

Moved more data around.

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCM.cpp

    r13879 r13883  
    103103    pVM->hwaccm.s.fActive        = false;
    104104    pVM->hwaccm.s.fNestedPaging  = false;
    105 
    106     /* On first entry we'll sync everything. */
    107     pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
    108105
    109106    /*
     
    874871        hwaccmR3DisableRawMode(pVM);
    875872
    876     /* On first entry we'll sync everything. */
    877     pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
    878 
    879873    for (unsigned i=0;i<pVM->cCPUs;i++)
    880874    {
     875        /* On first entry we'll sync everything. */
     876        pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
     877
    881878        pVM->aCpus[i].hwaccm.s.vmx.cr0_mask = 0;
    882879        pVM->aCpus[i].hwaccm.s.vmx.cr4_mask = 0;
    883     }
    884 
    885     pVM->hwaccm.s.Event.fPending = false;
     880
     881        pVM->aCpus[i].hwaccm.s.Event.fPending = false;
     882    }
    886883
    887884    /* Reset state information for real-mode emulation in VT-x. */
     
    10501047VMMR3DECL(bool) HWACCMR3IsEventPending(PVM pVM)
    10511048{
    1052     return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.Event.fPending;
     1049    /* @todo SMP */
     1050    return HWACCMIsEnabled(pVM) && pVM->aCpus[0].hwaccm.s.Event.fPending;
    10531051}
    10541052
     
    10701068
    10711069        case VERR_VMX_INVALID_VMCS_PTR:
    1072             LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
    1073             LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->hwaccm.s.vmx.lasterror.ulVMCSRevision));
     1070            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
     1071            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
    10741072            break;
    10751073
    10761074        case VERR_VMX_UNABLE_TO_START_VM:
    1077             LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->hwaccm.s.vmx.lasterror.ulLastInstrError));
    1078             LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason       %x\n", i, pVM->hwaccm.s.vmx.lasterror.ulLastExitReason));
     1075            LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastInstrError));
     1076            LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastExitReason));
    10791077            break;
    10801078
    10811079        case VERR_VMX_UNABLE_TO_RESUME_VM:
    1082             LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->hwaccm.s.vmx.lasterror.ulLastInstrError));
    1083             LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason       %x\n", i, pVM->hwaccm.s.vmx.lasterror.ulLastExitReason));
     1080            LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastInstrError));
     1081            LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastExitReason));
    10841082            break;
    10851083
     
    11031101    Log(("hwaccmR3Save:\n"));
    11041102
    1105     /*
    1106      * Save the basic bits - fortunately all the other things can be resynced on load.
    1107      */
    1108     rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.fPending);
    1109     AssertRCReturn(rc, rc);
    1110     rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.errCode);
    1111     AssertRCReturn(rc, rc);
    1112     rc = SSMR3PutU64(pSSM, pVM->hwaccm.s.Event.intInfo);
    1113     AssertRCReturn(rc, rc);
     1103    for (unsigned i=0;i<pVM->cCPUs;i++)
     1104    {
     1105        /*
     1106         * Save the basic bits - fortunately all the other things can be resynced on load.
     1107         */
     1108        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
     1109        AssertRCReturn(rc, rc);
     1110        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
     1111        AssertRCReturn(rc, rc);
     1112        rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
     1113        AssertRCReturn(rc, rc);
     1114    }
    11141115
    11151116    return VINF_SUCCESS;
     
    11381139        return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    11391140    }
    1140     rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.fPending);
    1141     AssertRCReturn(rc, rc);
    1142     rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.errCode);
    1143     AssertRCReturn(rc, rc);
    1144     rc = SSMR3GetU64(pSSM, &pVM->hwaccm.s.Event.intInfo);
    1145     AssertRCReturn(rc, rc);
    1146 
     1141    for (unsigned i=0;i<pVM->cCPUs;i++)
     1142    {
     1143        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
     1144        AssertRCReturn(rc, rc);
     1145        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
     1146        AssertRCReturn(rc, rc);
     1147        rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
     1148        AssertRCReturn(rc, rc);
     1149    }
    11471150    return VINF_SUCCESS;
    11481151}
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r13880 r13883  
    188188    bool                        fAllowVPID;
    189189
    190     /** Set if we need to flush the TLB during the world switch. */
    191     bool                        fForceTLBFlush;
    192 
    193     /** Old style FPU reporting trap mask override performed (optimization) */
    194     bool                        fFPUOldStyleOverride;
    195 
    196190    /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
    197191     *  naturally. */
    198192    bool                        padding[1];
    199193
    200     /** HWACCM_CHANGED_* flags. */
    201     RTUINT                      fContextUseFlags;
    202 
    203     /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
    204     RTCPUID                     idLastCpu;
    205 
    206     /* TLB flush count */
    207     RTUINT                      cTLBFlushes;
    208 
    209     /* Current ASID in use by the VM */
    210     RTUINT                      uCurrentASID;
    211 
    212194    /** Maximum ASID allowed. */
    213195    RTUINT                      uMaxASID;
     
    222204        /** Set when we've enabled VMX. */
    223205        bool                        fEnabled;
    224 
    225         /** Set if we can use VMXResume to execute guest code. */
    226         bool                        fResumeVM;
    227206
    228207        /** Set if VPID is supported. */
     
    271250
    272251        /** Ring 0 handlers for VT-x. */
    273         DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM));
     252        DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
    274253
    275254        /** Host CR4 value (set by ring-0 VMX init) */
     
    304283        VMX_FLUSH                   enmFlushPage;
    305284        VMX_FLUSH                   enmFlushContext;
    306 
    307         /** Real-mode emulation state. */
    308         struct
    309         {
    310             X86EFLAGS                   eflags;
    311             uint32_t                    fValid;
    312         } RealMode;
    313 
    314         struct
    315         {
    316             uint64_t                u64VMCSPhys;
    317             uint32_t                ulVMCSRevision;
    318             uint32_t                ulLastInstrError;
    319             uint32_t                ulLastExitReason;
    320             uint32_t                padding;
    321         } lasterror;
    322285    } vmx;
    323286
     
    328291        /** Set when we've enabled SVM. */
    329292        bool                        fEnabled;
    330         /** Set if we don't have to flush the TLB on VM entry. */
    331         bool                        fResumeVM;
    332293        /** Set if erratum 170 affects the AMD cpu. */
    333294        bool                        fAlwaysFlushTLB;
     295        /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
     296         *  naturally. */
     297        bool                        padding[1];
    334298
    335299        /** R0 memory object for the host VM control block (VMCB). */
     
    366330        uint32_t                    u32AMDFeatureEDX;
    367331    } cpuid;
    368 
    369 #if HC_ARCH_BITS == 32
    370     uint32_t                        Alignment1;
    371 #endif
    372 
    373     /** Event injection state. */
    374     struct
    375     {
    376         uint32_t                    fPending;
    377         uint32_t                    errCode;
    378         uint64_t                    intInfo;
    379     } Event;
    380332
    381333    /** Saved error from detection */
     
    469421typedef struct HWACCMCPU
    470422{
    471     /** Offset to the VM structure.
    472      * See HWACCMCPU2VM(). */
    473     RTUINT                      offVMCPU;
     423    /** Old style FPU reporting trap mask override performed (optimization) */
     424    bool                        fFPUOldStyleOverride;
     425
     426    /** Set if we don't have to flush the TLB on VM entry. */
     427    bool                        fResumeVM;
     428
     429    /** Set if we need to flush the TLB during the world switch. */
     430    bool                        fForceTLBFlush;
     431
     432    /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
     433     *  naturally. */
     434    bool                        padding[1];
     435
     436    /** HWACCM_CHANGED_* flags. */
     437    RTUINT                      fContextUseFlags;
     438
     439    /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
     440    RTCPUID                     idLastCpu;
     441
     442    /* TLB flush count */
     443    RTUINT                      cTLBFlushes;
     444
     445    /* Current ASID in use by the VM */
     446    RTUINT                      uCurrentASID;
    474447
    475448    struct
     
    495468        /** Current EPTP. */
    496469        RTHCPHYS                    GCPhysEPTP;
     470
     471        /** Real-mode emulation state. */
     472        struct
     473        {
     474            X86EFLAGS                   eflags;
     475            uint32_t                    fValid;
     476        } RealMode;
     477
     478        struct
     479        {
     480            uint64_t                u64VMCSPhys;
     481            uint32_t                ulVMCSRevision;
     482            uint32_t                ulLastInstrError;
     483            uint32_t                ulLastExitReason;
     484            uint32_t                padding;
     485        } lasterror;
     486
    497487    } vmx;
    498488
     
    510500
    511501    } svm;
     502
     503    /** Event injection state. */
     504    struct
     505    {
     506        uint32_t                    fPending;
     507        uint32_t                    errCode;
     508        uint64_t                    intInfo;
     509    } Event;
    512510
    513511} HWACCMCPU;
  • trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp

    r13832 r13883  
    5454{
    5555#ifdef IN_RING0
     56    PVMCPU pVCpu = &pVM->aCpus[HWACCMGetVMCPUId(pVM)];
    5657    if (pVM->hwaccm.s.vmx.fSupported)
    57         return VMXR0InvalidatePage(pVM, GCVirt);
     58        return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
    5859
    5960    Assert(pVM->hwaccm.s.svm.fSupported);
    60     return SVMR0InvalidatePage(pVM, GCVirt);
     61    return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
    6162#endif
    6263
     
    7475    LogFlow(("HWACCMFlushTLB\n"));
    7576
    76     pVM->hwaccm.s.fForceTLBFlush = true;
     77    pVM->aCpus[HWACCMGetVMCPUId(pVM)].hwaccm.s.fForceTLBFlush = true;
    7778    STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual);
    7879    return VINF_SUCCESS;
     
    120121
    121122#ifdef IN_RING0
     123    PVMCPU pVCpu = &pVM->aCpus[HWACCMGetVMCPUId(pVM)];
    122124    if (pVM->hwaccm.s.vmx.fSupported)
    123         return VMXR0InvalidatePhysPage(pVM, GCPhys);
     125        return VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
    124126
    125127    Assert(pVM->hwaccm.s.svm.fSupported);
    126     SVMR0InvalidatePhysPage(pVM, GCPhys);
     128    SVMR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
    127129#else
    128130    HWACCMFlushTLB(pVM);
     
    139141VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM)
    140142{
    141     return !!pVM->hwaccm.s.Event.fPending;
     143    /* @todo SMP */
     144    return !!pVM->aCpus[0].hwaccm.s.Event.fPending;
    142145}
    143146
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r13879 r13883  
    780780    pVM->hwaccm.s.uMaxASID                  = HWACCMR0Globals.uMaxASID;
    781781
    782     /* Invalidate the last cpu we were running on. */
    783     pVM->hwaccm.s.idLastCpu                 = NIL_RTCPUID;
    784 
    785     /* we'll aways increment this the first time (host uses ASID 0) */
    786     pVM->hwaccm.s.uCurrentASID              = 0;
     782    for (unsigned i=0;i<pVM->cCPUs;i++)
     783    {
     784        /* Invalidate the last cpu we were running on. */
     785        pVM->aCpus[i].hwaccm.s.idLastCpu                 = NIL_RTCPUID;
     786
     787        /* we'll aways increment this the first time (host uses ASID 0) */
     788        pVM->aCpus[i].hwaccm.s.uCurrentASID              = 0;
     789    }
    787790
    788791    ASMAtomicWriteBool(&pCpu->fInUse, true);
     
    850853
    851854    ASMAtomicWriteBool(&pCpu->fInUse, true);
     855
     856    for (unsigned i=0;i<pVM->cCPUs;i++)
     857    {
     858        /* On first entry we'll sync everything. */
     859        pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
     860    }
    852861
    853862    /* Setup VT-x or AMD-V. */
     
    887896
    888897    /* Always reload the host context and the guest's CR0 register. (!!!!) */
    889     pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
     898    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
    890899
    891900    /* Setup the register and mask according to the current execution mode. */
     
    943952        CPUMR0SaveGuestFPU(pVM, pCtx);
    944953
    945         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     954        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    946955    }
    947956
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r13879 r13883  
    391391 * @returns VBox status code.
    392392 * @param   pVM         The VM to operate on.
     393 * @param   pVCpu       The VM CPU to operate on.
    393394 * @param   pVMCB       SVM control block
    394395 * @param   pCtx        CPU Context
    395396 */
    396 static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
     397static int SVMR0CheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
    397398{
    398399    int rc;
    399400
    400401    /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
    401     if (pVM->hwaccm.s.Event.fPending)
     402    if (pVCpu->hwaccm.s.Event.fPending)
    402403    {
    403404        SVM_EVENT Event;
    404405
    405         Log(("Reinjecting event %08x %08x at %RGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip));
     406        Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip));
    406407        STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
    407         Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
     408        Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
    408409        SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
    409410
    410         pVM->hwaccm.s.Event.fPending = false;
     411        pVCpu->hwaccm.s.Event.fPending = false;
    411412        return VINF_SUCCESS;
    412413    }
     
    525526 * @returns VBox status code.
    526527 * @param   pVM         The VM to operate on.
    527  * @param   pVMCPU      The VM CPU to operate on.
     528 * @param   pVCpu       The VM CPU to operate on.
    528529 */
    529530VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
     
    542543 * @returns VBox status code.
    543544 * @param   pVM         The VM to operate on.
    544  * @param   pVMCPU      The VM CPU to operate on.
     545 * @param   pVCpu       The VM CPU to operate on.
    545546 * @param   pCtx        Guest context
    546547 */
     
    560561
    561562    /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
    562     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
     563    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
    563564    {
    564565        SVM_WRITE_SELREG(CS, cs);
     
    571572
    572573    /* Guest CPU context: LDTR. */
    573     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     574    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
    574575    {
    575576        SVM_WRITE_SELREG(LDTR, ldtr);
     
    577578
    578579    /* Guest CPU context: TR. */
    579     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     580    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
    580581    {
    581582        SVM_WRITE_SELREG(TR, tr);
     
    583584
    584585    /* Guest CPU context: GDTR. */
    585     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     586    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
    586587    {
    587588        pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
     
    590591
    591592    /* Guest CPU context: IDTR. */
    592     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     593    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
    593594    {
    594595        pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
     
    604605
    605606    /* Control registers */
    606     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     607    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
    607608    {
    608609        val = pCtx->cr0;
     
    620621
    621622                /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
    622                 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
     623                if (!pVCpu->hwaccm.s.fFPUOldStyleOverride)
    623624                {
    624625                    pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
    625                     pVM->hwaccm.s.fFPUOldStyleOverride = true;
     626                    pVCpu->hwaccm.s.fFPUOldStyleOverride = true;
    626627                }
    627628            }
     
    643644    pVMCB->guest.u64CR2 = pCtx->cr2;
    644645
    645     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
     646    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
    646647    {
    647648        /* Save our shadow CR3 register. */
     
    659660    }
    660661
    661     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     662    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
    662663    {
    663664        val = pCtx->cr4;
     
    698699
    699700    /* Debug registers. */
    700     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     701    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
    701702    {
    702703        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    789790
    790791    /* Done. */
    791     pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     792    pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
    792793
    793794    return VINF_SUCCESS;
     
    800801 * @returns VBox status code.
    801802 * @param   pVM         The VM to operate on.
    802  * @param   pVMCPU      The VM CPU to operate on.
     803 * @param   pVCpu       The VM CPU to operate on.
    803804 * @param   pCtx        Guest context
    804805 */
     
    882883    /* When external interrupts are pending, we should exit the VM when IF is set. */
    883884    /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
    884     rc = SVMR0CheckPendingInterrupt(pVM, pVMCB, pCtx);
     885    rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx);
    885886    if (RT_FAILURE(rc))
    886887    {
     
    923924#ifdef LOG_ENABLED
    924925    pCpu = HWACCMR0GetCurrentCpu();
    925     if (    pVM->hwaccm.s.idLastCpu   != pCpu->idCpu
    926         ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    927     {
    928         if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu)
    929             Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu));
     926    if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
     927        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     928    {
     929        if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
     930            Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
    930931        else
    931             Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     932            Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    932933    }
    933934    if (pCpu->fFlushTLB)
     
    955956    /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    956957    /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
    957     if (    pVM->hwaccm.s.idLastCpu != pCpu->idCpu
     958    if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    958959            /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    959         ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     960        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    960961    {
    961962        /* Force a TLB flush on VM entry. */
    962         pVM->hwaccm.s.fForceTLBFlush = true;
     963        pVCpu->hwaccm.s.fForceTLBFlush = true;
    963964    }
    964965    else
    965966        Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
    966967
    967     pVM->hwaccm.s.idLastCpu = pCpu->idCpu;
     968    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    968969
    969970    /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
    970     if (    pVM->hwaccm.s.fForceTLBFlush
     971    if (    pVCpu->hwaccm.s.fForceTLBFlush
    971972        && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
    972973    {
     
    982983            STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
    983984
    984         pVM->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
    985         pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     985        pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     986        pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    986987    }
    987988    else
     
    990991
    991992        /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
    992         if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID)
    993             pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
    994 
    995         Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVM->hwaccm.s.fForceTLBFlush);
    996         pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.fForceTLBFlush;
    997     }
    998     AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     993        if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID)
     994            pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
     995
     996        Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVCpu->hwaccm.s.fForceTLBFlush);
     997        pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVCpu->hwaccm.s.fForceTLBFlush;
     998    }
     999    AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    9991000    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1000     AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));
    1001     pVMCB->ctrl.TLBCtrl.n.u32ASID = pVM->hwaccm.s.uCurrentASID;
     1001    AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1002    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
    10021003
    10031004#ifdef VBOX_WITH_STATISTICS
     
    10091010
    10101011    /* In case we execute a goto ResumeExecution later on. */
    1011     pVM->hwaccm.s.svm.fResumeVM      = true;
    1012     pVM->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
     1012    pVCpu->hwaccm.s.fResumeVM      = true;
     1013    pVCpu->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
    10131014
    10141015    Assert(sizeof(pVCpu->hwaccm.s.svm.pVMCBPhys) == 8);
     
    12371238
    12381239    /* Check if an injected event was interrupted prematurely. */
    1239     pVM->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
     1240    pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
    12401241    if (    pVMCB->ctrl.ExitIntInfo.n.u1Valid
    12411242        &&  pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
    12421243    {
    1243         Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
     1244        Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
    12441245
    12451246#ifdef LOG_ENABLED
    12461247        SVM_EVENT Event;
    1247         Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
     1248        Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
    12481249
    12491250        if (    exitCode == SVM_EXIT_EXCEPTION_E
     
    12541255#endif
    12551256
    1256         pVM->hwaccm.s.Event.fPending = true;
     1257        pVCpu->hwaccm.s.Event.fPending = true;
    12571258        /* Error code present? (redundant) */
    12581259        if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
    12591260        {
    1260             pVM->hwaccm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
     1261            pVCpu->hwaccm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
    12611262        }
    12621263        else
    1263             pVM->hwaccm.s.Event.errCode  = 0;
     1264            pVCpu->hwaccm.s.Event.errCode  = 0;
    12641265    }
    12651266#ifdef VBOX_WITH_STATISTICS
     
    13361337                /* Continue execution. */
    13371338                STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
    1338                 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1339                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    13391340
    13401341                goto ResumeExecution;
     
    16411642        {
    16421643        case 0:
    1643             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1644            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    16441645            break;
    16451646        case 2:
     
    16471648        case 3:
    16481649            Assert(!pVM->hwaccm.s.fNestedPaging);
    1649             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     1650            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
    16501651            break;
    16511652        case 4:
    1652             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     1653            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
    16531654            break;
    16541655        case 8:
     
    16671668
    16681669            /* Must be set by PGMSyncCR3 */
    1669             Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s.fForceTLBFlush);
     1670            Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush);
    16701671        }
    16711672        if (rc == VINF_SUCCESS)
     
    17331734        {
    17341735            /* EIP has been updated already. */
    1735             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     1736            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
    17361737
    17371738            /* Only resume if successful. */
     
    20362037        STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
    20372038        /* On the next entry we'll only sync the host context. */
    2038         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     2039        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
    20392040    }
    20402041    else
     
    20432044        /** @todo we can do better than this */
    20442045        /* Not in the VINF_PGM_CHANGE_MODE though! */
    2045         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     2046        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
    20462047    }
    20472048
     
    20662067    Assert(pVM->hwaccm.s.svm.fSupported);
    20672068
    2068     LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.idLastCpu, pVM->hwaccm.s.uCurrentASID));
    2069     pVM->hwaccm.s.svm.fResumeVM = false;
     2069    LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hwaccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));
     2070    pVCpu->hwaccm.s.fResumeVM = false;
    20702071
    20712072    /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
    2072     pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
     2073    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
    20732074
    20742075    return VINF_SUCCESS;
     
    21002101
    21012102        /* Resync the debug registers the next time. */
    2102         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2103        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
    21032104    }
    21042105    else
     
    21992200 * @returns VBox status code.
    22002201 * @param   pVM         The VM to operate on.
     2202 * @param   pVCpu       The VM CPU to operate on.
    22012203 * @param   GCVirt      Page to invalidate
    22022204 */
    2203 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
     2205VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    22042206{
    2205     bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.fForceTLBFlush;
     2207    bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVCpu->hwaccm.s.fForceTLBFlush;
    22062208
    22072209    /* Skip it if a TLB flush is already pending. */
     
    22302232 * @returns VBox status code.
    22312233 * @param   pVM         The VM to operate on.
     2234 * @param   pVCpu       The VM CPU to operate on.
    22322235 * @param   GCPhys      Page to invalidate
    22332236 */
    2234 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
     2237VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    22352238{
    22362239    Assert(pVM->hwaccm.s.fNestedPaging);
    22372240    /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
    2238     pVM->hwaccm.s.fForceTLBFlush = true;
     2241    pVCpu->hwaccm.s.fForceTLBFlush = true;
    22392242    STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBInvlpga);
    22402243    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r13879 r13883  
    4747 * @returns VBox status code.
    4848 * @param   pVM         The VM to operate on.
    49  * @param   pVCpu      VPCPU id.
     49 * @param   pVCpu       The VMCPU to operate on.
    5050 * @param   pCpu        CPU info struct
    5151 */
     
    5757 * @returns VBox status code.
    5858 * @param   pVM         The VM to operate on.
    59  * @param   pVCpu      VPCPU id.
     59 * @param   pVCpu       The VMCPU to operate on.
    6060 * @param   pCtx        CPU context
    6161 */
     
    113113 * @returns VBox status code.
    114114 * @param   pVM         The VM to operate on.
    115  * @param   pVCpu      VPCPU id.
     115 * @param   pVCpu       The VMCPU to operate on.
    116116 * @param   pCtx        Guest context
    117117 */
     
    124124 * @returns VBox status code.
    125125 * @param   pVM         The VM to operate on.
    126  * @param   pVCpu      VPCPU id.
     126 * @param   pVCpu       The VMCPU to operate on.
    127127 */
    128128VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     
    133133 * @returns VBox status code.
    134134 * @param   pVM         The VM to operate on.
    135  * @param   pVCpu      VPCPU id.
     135 * @param   pVCpu       The VMCPU to operate on.
    136136 * @param   pCtx        Guest context
    137137 */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r13879 r13883  
    5151*   Local Functions                                                            *
    5252*******************************************************************************/
    53 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx);
    54 static void vmxR0SetupTLBEPT(PVM pVM);
    55 static void vmxR0SetupTLBVPID(PVM pVM);
    56 static void vmxR0SetupTLBDummy(PVM pVM);
    57 static void vmxR0FlushEPT(PVM pVM, VMX_FLUSH enmFlush, RTGCPHYS GCPhys);
    58 static void vmxR0FlushVPID(PVM pVM, VMX_FLUSH enmFlush, RTGCPTR GCPtr);
    59 static void vmxR0UpdateExceptionBitmap(PVM pVM, PCPUMCTX pCtx);
     53static void VMXR0ReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTX pCtx);
     54static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu);
     55static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu);
     56static void vmxR0SetupTLBDummy(PVM pVM, PVMCPU pVCpu);
     57static void vmxR0FlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPHYS GCPhys);
     58static void vmxR0FlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr);
     59static void vmxR0UpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    6060
    6161
     
    533533 * @returns VBox status code.
    534534 * @param   pVM         The VM to operate on.
     535 * @param   pVCpu       The VMCPU to operate on.
    535536 * @param   pCtx        CPU Context
    536537 * @param   intInfo     VMX interrupt info
     
    538539 * @param   errCode     Error code (optional)
    539540 */
    540 static int VMXR0InjectEvent(PVM pVM, CPUMCTX *pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
     541static int VMXR0InjectEvent(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
    541542{
    542543    int         rc;
     
    583584                intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    584585
    585                 return VMXR0InjectEvent(pVM, pCtx, intInfo, 0, 0 /* no error code according to the Intel docs */);
     586                return VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo, 0, 0 /* no error code according to the Intel docs */);
    586587            }
    587588            Log(("Triple fault -> reset the VM!\n"));
     
    622623        pCtx->eflags.u     &= ~(X86_EFL_IF|X86_EFL_TF|X86_EFL_RF|X86_EFL_AC);
    623624
    624         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;
     625        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;
    625626        return VINF_SUCCESS;
    626627    }
     
    643644 * @returns VBox status code.
    644645 * @param   pVM         The VM to operate on.
    645  * @param   idVCpu      VMCPU id.
     646 * @param   pVCpu       The VMCPU to operate on.
    646647 * @param   pCtx        CPU Context
    647648 */
     
    651652
    652653    /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
    653     if (pVM->hwaccm.s.Event.fPending)
    654     {
    655         Log(("Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
     654    if (pVCpu->hwaccm.s.Event.fPending)
     655    {
     656        Log(("Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
    656657        STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
    657         rc = VMXR0InjectEvent(pVM, pCtx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode);
    658         AssertRC(rc);
    659 
    660         pVM->hwaccm.s.Event.fPending = false;
     658        rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);
     659        AssertRC(rc);
     660
     661        pVCpu->hwaccm.s.Event.fPending = false;
    661662        return VINF_SUCCESS;
    662663    }
     
    759760
    760761        STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
    761         rc = VMXR0InjectEvent(pVM, pCtx, intInfo, 0, errCode);
     762        rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode);
    762763        AssertRC(rc);
    763764    } /* if (interrupts can be dispatched) */
     
    771772 * @returns VBox status code.
    772773 * @param   pVM         The VM to operate on.
    773  * @param   idVCpu      VPCPU id.
     774 * @param   pVCpu       The VMCPU to operate on.
    774775 */
    775776VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
     
    780781     * Host CPU Context
    781782     */
    782     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     783    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
    783784    {
    784785        RTIDTR      idtr;
     
    870871        AssertRC(rc);
    871872
    872         pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
     873        pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
    873874    }
    874875    return rc;
     
    903904 *
    904905 * @param   pVM         The VM to operate on.
     906 * @param   pVCpu       The VMCPU to operate on.
    905907 * @param   pCtx        Guest context
    906908 */
    907 static void vmxR0UpdateExceptionBitmap(PVM pVM, PCPUMCTX pCtx)
     909static void vmxR0UpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    908910{
    909911    uint32_t u32TrapMask;
     
    919921    if (    CPUMIsGuestFPUStateActive(pVM) == true
    920922        && !(pCtx->cr0 & X86_CR0_NE)
    921         && !pVM->hwaccm.s.fFPUOldStyleOverride)
     923        && !pVCpu->hwaccm.s.fFPUOldStyleOverride)
    922924    {
    923925        u32TrapMask |= RT_BIT(X86_XCPT_MF);
    924         pVM->hwaccm.s.fFPUOldStyleOverride = true;
     926        pVCpu->hwaccm.s.fFPUOldStyleOverride = true;
    925927    }
    926928
     
    952954 * @returns VBox status code.
    953955 * @param   pVM         The VM to operate on.
    954  * @param   idVCpu      VPCPU id.
     956 * @param   pVCpu       The VMCPU to operate on.
    955957 * @param   pCtx        Guest context
    956958 */
     
    962964
    963965    /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
    964     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
     966    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
    965967    {
    966968#ifdef HWACCM_VMX_EMULATE_REALMODE
     
    10341036
    10351037    /* Guest CPU context: LDTR. */
    1036     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     1038    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
    10371039    {
    10381040        if (pCtx->ldtr == 0)
     
    10541056    }
    10551057    /* Guest CPU context: TR. */
    1056     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     1058    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
    10571059    {
    10581060#ifdef HWACCM_VMX_EMULATE_REALMODE
     
    10981100    }
    10991101    /* Guest CPU context: GDTR. */
    1100     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     1102    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
    11011103    {
    11021104        rc  = VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_LIMIT,       pCtx->gdtr.cbGdt);
     
    11051107    }
    11061108    /* Guest CPU context: IDTR. */
    1107     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     1109    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
    11081110    {
    11091111        rc  = VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_LIMIT,       pCtx->idtr.cbIdt);
     
    11211123
    11221124    /* Control registers */
    1123     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     1125    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
    11241126    {
    11251127        val = pCtx->cr0;
     
    11871189        AssertRC(rc);
    11881190    }
    1189     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     1191    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
    11901192    {
    11911193        /* CR4 */
     
    12581260    }
    12591261
    1260     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
     1262    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
    12611263    {
    12621264        if (pVM->hwaccm.s.fNestedPaging)
     
    13091311
    13101312    /* Debug registers. */
    1311     if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     1313    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
    13121314    {
    13131315        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    13631365    if (CPUMIsGuestInRealModeEx(pCtx))
    13641366    {
    1365         pVM->hwaccm.s.vmx.RealMode.eflags = eflags;
     1367        pVCpu->hwaccm.s.vmx.RealMode.eflags = eflags;
    13661368
    13671369        eflags.Bits.u1VM   = 1;
     
    14331435    }
    14341436
    1435     vmxR0UpdateExceptionBitmap(pVM, pCtx);
     1437    vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx);
    14361438
    14371439    /* Done. */
    1438     pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     1440    pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
    14391441
    14401442    return rc;
     
    14461448 * @returns VBox status code.
    14471449 * @param   pVM         The VM to operate on.
    1448  * @param   idVCpu      VMCPU id.
     1450 * @param   pVCpu       The VMCPU to operate on.
    14491451 * @param   pCtx        Guest context
    14501452 */
     
    15491551        /* Hide our emulation flags */
    15501552        pCtx->eflags.Bits.u1VM   = 0;
    1551         pCtx->eflags.Bits.u2IOPL = pVM->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL;
     1553        pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL;
    15521554
    15531555        /* Force a TR resync every time in case we switch modes. */
    1554         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;
     1556        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;
    15551557    }
    15561558    else
     
    15671569 *
    15681570 * @param   pVM         The VM to operate on.
     1571 * @param   pVCpu       The VMCPU to operate on.
    15691572 */
    1570 static void vmxR0SetupTLBDummy(PVM pVM)
     1573static void vmxR0SetupTLBDummy(PVM pVM, PVMCPU pVCpu)
    15711574{
     1575    NOREF(pVM);
     1576    NOREF(pVCpu);
    15721577    return;
    15731578}
     
    15781583 * @returns VBox status code.
    15791584 * @param   pVM         The VM to operate on.
     1585 * @param   pVCpu       The VMCPU to operate on.
    15801586 */
    1581 static void vmxR0SetupTLBEPT(PVM pVM)
     1587static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu)
    15821588{
    15831589    PHWACCM_CPUINFO pCpu;
     
    15901596    /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    15911597    /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
    1592     if (    pVM->hwaccm.s.idLastCpu != pCpu->idCpu
     1598    if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    15931599            /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    1594         ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1600        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    15951601    {
    15961602        /* Force a TLB flush on VM entry. */
    1597         pVM->hwaccm.s.fForceTLBFlush = true;
     1603        pVCpu->hwaccm.s.fForceTLBFlush = true;
    15981604    }
    15991605    else
    16001606        Assert(!pCpu->fFlushTLB);
    16011607
    1602     pVM->hwaccm.s.idLastCpu = pCpu->idCpu;
     1608    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    16031609    pCpu->fFlushTLB         = false;
    16041610
    1605     if (pVM->hwaccm.s.fForceTLBFlush)
    1606         vmxR0FlushEPT(pVM, pVM->hwaccm.s.vmx.enmFlushContext, 0);
     1611    if (pVCpu->hwaccm.s.fForceTLBFlush)
     1612        vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0);
    16071613
    16081614#ifdef VBOX_WITH_STATISTICS
    1609     if (pVM->hwaccm.s.fForceTLBFlush)
     1615    if (pVCpu->hwaccm.s.fForceTLBFlush)
    16101616        STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
    16111617    else
     
    16201626 * @returns VBox status code.
    16211627 * @param   pVM         The VM to operate on.
     1628 * @param   pVCpu       The VMCPU to operate on.
    16221629 */
    1623 static void vmxR0SetupTLBVPID(PVM pVM)
     1630static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu)
    16241631{
    16251632    PHWACCM_CPUINFO pCpu;
     
    16321639    /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    16331640    /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
    1634     if (    pVM->hwaccm.s.idLastCpu != pCpu->idCpu
     1641    if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    16351642            /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    1636         ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1643        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    16371644    {
    16381645        /* Force a TLB flush on VM entry. */
    1639         pVM->hwaccm.s.fForceTLBFlush = true;
     1646        pVCpu->hwaccm.s.fForceTLBFlush = true;
    16401647    }
    16411648    else
    16421649        Assert(!pCpu->fFlushTLB);
    16431650
    1644     pVM->hwaccm.s.idLastCpu = pCpu->idCpu;
     1651    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    16451652
    16461653    /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
    1647     if (pVM->hwaccm.s.fForceTLBFlush)
     1654    if (pVCpu->hwaccm.s.fForceTLBFlush)
    16481655    {
    16491656        if (    ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID
     
    16571664        {
    16581665            STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
    1659             pVM->hwaccm.s.fForceTLBFlush     = false;
    1660         }
    1661 
    1662         pVM->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
    1663         pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     1666            pVCpu->hwaccm.s.fForceTLBFlush     = false;
     1667        }
     1668
     1669        pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1670        pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    16641671    }
    16651672    else
     
    16671674        Assert(!pCpu->fFlushTLB);
    16681675
    1669         if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID)
    1670             pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
    1671     }
    1672     AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1676        if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID)
     1677            pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
     1678    }
     1679    AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    16731680    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1674     AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));
    1675 
    1676     int rc  = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVM->hwaccm.s.uCurrentASID);
     1681    AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1682
     1683    int rc  = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
    16771684    AssertRC(rc);
    16781685
    1679     if (pVM->hwaccm.s.fForceTLBFlush)
    1680         vmxR0FlushVPID(pVM, pVM->hwaccm.s.vmx.enmFlushContext, 0);
     1686    if (pVCpu->hwaccm.s.fForceTLBFlush)
     1687        vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0);
    16811688
    16821689#ifdef VBOX_WITH_STATISTICS
    1683     if (pVM->hwaccm.s.fForceTLBFlush)
     1690    if (pVCpu->hwaccm.s.fForceTLBFlush)
    16841691        STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
    16851692    else
     
    16941701 * @returns VBox status code.
    16951702 * @param   pVM         The VM to operate on.
    1696  * @param   idVCpu      VPCPU id.
     1703 * @param   pVCpu       The VMCPU to operate on.
    16971704 * @param   pCtx        Guest context
    16981705 */
     
    18791886    {
    18801887        pCpu = HWACCMR0GetCurrentCpu();
    1881         if (    pVM->hwaccm.s.idLastCpu   != pCpu->idCpu
    1882             ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1883         {
    1884             if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu)
    1885                 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu));
     1888        if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
     1889            ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1890        {
     1891            if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
     1892                Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
    18861893            else
    1887                 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1894                Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    18881895        }
    18891896        if (pCpu->fFlushTLB)
    18901897            Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu));
    18911898        else
    1892         if (pVM->hwaccm.s.fForceTLBFlush)
     1899        if (pVCpu->hwaccm.s.fForceTLBFlush)
    18931900            LogFlow(("Manual TLB flush\n"));
    18941901    }
     
    19181925
    19191926    /* Deal with tagged TLB setup and invalidation. */
    1920     pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM);
     1927    pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
    19211928
    19221929    /* Non-register state Guest Context */
     
    19451952#endif
    19461953    TMNotifyStartOfExecution(pVM);
    1947     rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx);
     1954    rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx);
    19481955    TMNotifyEndOfExecution(pVM);
    19491956
    19501957    /* In case we execute a goto ResumeExecution later on. */
    1951     pVM->hwaccm.s.vmx.fResumeVM  = true;
    1952     pVM->hwaccm.s.fForceTLBFlush = false;
     1958    pVCpu->hwaccm.s.fResumeVM  = true;
     1959    pVCpu->hwaccm.s.fForceTLBFlush = false;
    19531960
    19541961    /*
     
    19631970    if (rc != VINF_SUCCESS)
    19641971    {
    1965         VMXR0ReportWorldSwitchError(pVM, rc, pCtx);
     1972        VMXR0ReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
    19661973        goto end;
    19671974    }
     
    19952002    rc = VMXReadVMCS(VMX_VMCS_RO_IDT_INFO,            &val);
    19962003    AssertRC(rc);
    1997     pVM->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
    1998     if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVM->hwaccm.s.Event.intInfo)
    1999         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVM->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)
    2000     {
    2001         pVM->hwaccm.s.Event.fPending = true;
     2004    pVCpu->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
     2005    if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     2006        &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)
     2007    {
     2008        pVCpu->hwaccm.s.Event.fPending = true;
    20022009        /* Error code present? */
    2003         if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVM->hwaccm.s.Event.intInfo))
     2010        if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo))
    20042011        {
    20052012            rc = VMXReadVMCS(VMX_VMCS_RO_IDT_ERRCODE, &val);
    20062013            AssertRC(rc);
    2007             pVM->hwaccm.s.Event.errCode  = val;
    2008             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
     2014            pVCpu->hwaccm.s.Event.errCode  = val;
     2015            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
    20092016        }
    20102017        else
    20112018        {
    2012             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    2013             pVM->hwaccm.s.Event.errCode  = 0;
     2019            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
     2020            pVCpu->hwaccm.s.Event.errCode  = 0;
    20142021        }
    20152022    }
     
    20812088                    /* Continue execution. */
    20822089                    STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
    2083                     pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2090                    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    20842091
    20852092                    goto ResumeExecution;
     
    20882095                Log(("Forward #NM fault to the guest\n"));
    20892096                STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
    2090                 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
     2097                rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
    20912098                AssertRC(rc);
    20922099                STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
     
    21092116                    /* Now we must update CR2. */
    21102117                    pCtx->cr2 = exitQualification;
    2111                     rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2118                    rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    21122119                    AssertRC(rc);
    21132120
     
    21522159                    /* Now we must update CR2. */
    21532160                    pCtx->cr2 = exitQualification;
    2154                     rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2161                    rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    21552162                    AssertRC(rc);
    21562163
     
    21782185                }
    21792186                Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip));
    2180                 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2187                rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    21812188                AssertRC(rc);
    21822189
     
    22262233
    22272234                    Log(("Trap %x (debug) at %RGv exit qualification %RX64\n", vector, (RTGCPTR)pCtx->rip, exitQualification));
    2228                     rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2235                    rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    22292236                    AssertRC(rc);
    22302237
     
    22452252                {
    22462253                    Log(("Trap %x at %04X:%RGv errorCode=%x\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, errCode));
    2247                     rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2254                    rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    22482255                    AssertRC(rc);
    22492256                    STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
     
    22602267
    22612268                    /* lidt, lgdt can end up here. In the future crx changes as well. Just reload the whole context to be done with it. */
    2262                     pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     2269                    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
    22632270
    22642271                    /* Only resume if successful. */
     
    22932300
    22942301                Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip));
    2295                 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2302                rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    22962303                AssertRC(rc);
    22972304
     
    23052312                {
    23062313                    Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs, pCtx->eip, errCode));
    2307                     rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2314                    rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
    23082315                    AssertRC(rc);
    23092316
     
    25012508            {
    25022509            case 0:
    2503                 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
     2510                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
    25042511                break;
    25052512            case 2:
     
    25072514            case 3:
    25082515                Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
    2509                 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     2516                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
    25102517                break;
    25112518            case 4:
    2512                 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     2519                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
    25132520                break;
    25142521            case 8:
     
    25482555            STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCLTS);
    25492556            rc = EMInterpretCLTS(pVM);
    2550             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2557            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    25512558            break;
    25522559
     
    25552562            STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW);
    25562563            rc = EMInterpretLMSW(pVM, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
    2557             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2564            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    25582565            break;
    25592566        }
     
    26062613                                     VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
    26072614                                     VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
    2608             pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2615            pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
    26092616            Log2(("DR7=%08x\n", pCtx->dr[7]));
    26102617        }
     
    27562763
    27572764                            Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip));
    2758                             rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0);
     2765                            rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0);
    27592766                            AssertRC(rc);
    27602767
     
    29472954        STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
    29482955        /* On the next entry we'll only sync the host context. */
    2949         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     2956        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
    29502957    }
    29512958    else
     
    29542961        /** @todo we can do better than this */
    29552962        /* Not in the VINF_PGM_CHANGE_MODE though! */
    2956         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     2963        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
    29572964    }
    29582965
     
    29642971    if (rc == VERR_VMX_INVALID_VMCS_PTR)
    29652972    {
    2966         VMXGetActivateVMCS(&pVM->hwaccm.s.vmx.lasterror.u64VMCSPhys);
    2967         pVM->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pVMCS;
     2973        VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys);
     2974        pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pVMCS;
    29682975    }
    29692976
     
    29802987 * @returns VBox status code.
    29812988 * @param   pVM         The VM to operate on.
    2982  * @param   idVCpu      VPCPU id.
     2989 * @param   pVCpu       The VMCPU to operate on.
    29832990 * @param   pCpu        CPU info struct
    29842991 */
     
    29993006        return rc;
    30003007
    3001     pVM->hwaccm.s.vmx.fResumeVM = false;
     3008    pVCpu->hwaccm.s.fResumeVM = false;
    30023009    return VINF_SUCCESS;
    30033010}
     
    30093016 * @returns VBox status code.
    30103017 * @param   pVM         The VM to operate on.
    3011  * @param   idVCpu      VPCPU id.
     3018 * @param   pVCpu       The VMCPU to operate on.
    30123019 * @param   pCtx        CPU context
    30133020 */
     
    30273034
    30283035        /* Resync the debug registers the next time. */
    3029         pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     3036        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
    30303037    }
    30313038    else
     
    30443051 * @returns VBox status code.
    30453052 * @param   pVM         The VM to operate on.
     3053 * @param   pVCpu       The VM CPU to operate on.
    30463054 * @param   enmFlush    Type of flush
    30473055 * @param   GCPhys      Physical address of the page to flush
    30483056 */
    3049 static void vmxR0FlushEPT(PVM pVM, VMX_FLUSH enmFlush, RTGCPHYS GCPhys)
     3057static void vmxR0FlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPHYS GCPhys)
    30503058{
    30513059    uint64_t descriptor[2];
     
    30533061    LogFlow(("vmxR0FlushEPT %d %RGv\n", enmFlush, GCPhys));
    30543062    Assert(pVM->hwaccm.s.fNestedPaging);
    3055     /* @todo SMP */
    3056     descriptor[0] = pVM->aCpus[0].hwaccm.s.vmx.GCPhysEPTP;
     3063    descriptor[0] = pVCpu->hwaccm.s.vmx.GCPhysEPTP;
    30573064    descriptor[1] = GCPhys;
    30583065    int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
     
    30663073 * @returns VBox status code.
    30673074 * @param   pVM         The VM to operate on.
     3075 * @param   pVCpu       The VM CPU to operate on.
    30683076 * @param   enmFlush    Type of flush
    30693077 * @param   GCPtr       Virtual address of the page to flush
    30703078 */
    3071 static void vmxR0FlushVPID(PVM pVM, VMX_FLUSH enmFlush, RTGCPTR GCPtr)
     3079static void vmxR0FlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr)
    30723080{
    30733081    uint64_t descriptor[2];
    30743082
    30753083    Assert(pVM->hwaccm.s.vmx.fVPID);
    3076     descriptor[0] = pVM->hwaccm.s.uCurrentASID;
     3084    descriptor[0] = pVCpu->hwaccm.s.uCurrentASID;
    30773085    descriptor[1] = GCPtr;
    30783086    int rc = VMXR0InvVPID(enmFlush, &descriptor[0]);
     
    30863094 * @returns VBox status code.
    30873095 * @param   pVM         The VM to operate on.
     3096 * @param   pVCpu       The VM CPU to operate on.
    30883097 * @param   GCVirt      Page to invalidate
    30893098 */
    3090 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
     3099VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    30913100{
    3092     bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush;
     3101    bool fFlushPending = pVCpu->hwaccm.s.fForceTLBFlush;
    30933102
    30943103    LogFlow(("VMXR0InvalidatePage %RGv\n", GCVirt));
     
    31023111    if (   !fFlushPending
    31033112        && pVM->hwaccm.s.vmx.fVPID)
    3104         vmxR0FlushVPID(pVM, pVM->hwaccm.s.vmx.enmFlushPage, GCVirt);
     3113        vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCVirt);
    31053114#endif /* HWACCM_VTX_WITH_VPID */
    31063115
     
    31153124 * @returns VBox status code.
    31163125 * @param   pVM         The VM to operate on.
     3126 * @param   pVCpu       The VM CPU to operate on.
    31173127 * @param   GCPhys      Page to invalidate
    31183128 */
    3119 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
     3129VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    31203130{
    3121     bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush;
     3131    bool fFlushPending = pVCpu->hwaccm.s.fForceTLBFlush;
    31223132
    31233133    Assert(pVM->hwaccm.s.fNestedPaging);
     
    31273137    /* Skip it if a TLB flush is already pending. */
    31283138    if (!fFlushPending)
    3129         vmxR0FlushEPT(pVM, pVM->hwaccm.s.vmx.enmFlushPage, GCPhys);
     3139        vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCPhys);
    31303140
    31313141    return VINF_SUCCESS;
     
    31363146 *
    31373147 * @param   pVM         The VM to operate on.
     3148 * @param   pVCpu       The VMCPU to operate on.
    31383149 * @param   rc          Return code
    31393150 * @param   pCtx        Current CPU context (not updated)
    31403151 */
    3141 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx)
     3152static void VMXR0ReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTX pCtx)
    31423153{
    31433154    switch (rc)
     
    31613172            Log(("Current stack %08x\n", &rc));
    31623173
    3163             pVM->hwaccm.s.vmx.lasterror.ulLastInstrError = instrError;
    3164             pVM->hwaccm.s.vmx.lasterror.ulLastExitReason = exitReason;
     3174            pVCpu->hwaccm.s.vmx.lasterror.ulLastInstrError = instrError;
     3175            pVCpu->hwaccm.s.vmx.lasterror.ulLastExitReason = exitReason;
    31653176
    31663177#ifdef VBOX_STRICT
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r13879 r13883  
    4747 * @returns VBox status code.
    4848 * @param   pVM         The VM to operate on.
    49  * @param   pVCpu      VPCPU id.
     49 * @param   pVCpu       The VMCPU to operate on.
    5050 * @param   pCpu        CPU info struct
    5151 */
     
    5757 * @returns VBox status code.
    5858 * @param   pVM         The VM to operate on.
    59  * @param   pVCpu      VPCPU id.
     59 * @param   pVCpu       The VMCPU to operate on.
    6060 * @param   pCtx        CPU context
    6161 */
     
    114114 * @returns VBox status code.
    115115 * @param   pVM         The VM to operate on.
    116  * @param   pVCpu      VPCPU id.
     116 * @param   pVCpu       The VMCPU to operate on.
    117117 */
    118118VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     
    123123 * @returns VBox status code.
    124124 * @param   pVM         The VM to operate on.
    125  * @param   pVCpu      VPCPU id.
     125 * @param   pVCpu       The VMCPU to operate on.
    126126 * @param   pCtx        Guest context
    127127 */
     
    134134 * @returns VBox status code.
    135135 * @param   pVM         The VM to operate on.
    136  * @param   pVCpu      VPCPU id.
     136 * @param   pVCpu       The VMCPU to operate on.
    137137 * @param   pCtx        Guest context
    138138 */
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r13796 r13883  
    264264    CHECK_MEMBER_ALIGNMENT(HWACCM, u64RegisterMask, 8);
    265265    CHECK_MEMBER_ALIGNMENT(HWACCM, vmx.hostCR4, 8);
    266     CHECK_MEMBER_ALIGNMENT(HWACCM, Event.intInfo, 8);
     266    CHECK_MEMBER_ALIGNMENT(HWACCMCPU, Event.intInfo, 8);
    267267    CHECK_MEMBER_ALIGNMENT(HWACCM, StatEntry, 8);
    268268
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette