VirtualBox

Changeset 72805 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 3, 2018 4:05:43 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
123348
Message:

VMM/HM: bugref:9193 Fixes honoring pending VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES
before re-entering guest execution with VT-x R0 code.
Avoid a couple of VMWRITES because we already have cache's of the values (CR4 Mask, CR0 mask).
Parameter cleanup.

Location:
trunk/src/VBox/VMM
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r72772 r72805  
    8888    /** @name Ring-0 method table for AMD-V and VT-x specific operations.
    8989     * @{ */
    90     DECLR0CALLBACKMEMBER(int,  pfnEnterSession, (PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu));
     90    DECLR0CALLBACKMEMBER(int,  pfnEnterSession, (PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu));
    9191    DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit));
    9292    DECLR0CALLBACKMEMBER(int,  pfnExportHostState, (PVMCPU pVCpu));
    93     DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     93    DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu, PCPUMCTX pCtx));
    9494    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu, (PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    9595                                              bool fEnabledByHost, void *pvArg));
     
    234234 * @{ */
    235235
    236 static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    237 {
    238     RT_NOREF3(pVM, pVCpu, pCpu);
     236static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     237{
     238    RT_NOREF2(pVCpu, pCpu);
    239239    return VINF_SUCCESS;
    240240}
     
    276276}
    277277
    278 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    279 {
    280     RT_NOREF3(pVM, pVCpu, pCtx);
     278static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx)
     279{
     280    RT_NOREF2(pVCpu, pCtx);
    281281    return VINF_SUCCESS;
    282282}
     
    13891389 * @remarks No-long-jump zone!!!
    13901390 */
    1391 VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu)
     1391VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu)
    13921392{
    13931393    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    14181418 *
    14191419 * @returns VBox status code.
    1420  * @param   pVM        The cross context VM structure.
    14211420 * @param   pVCpu      The cross context virtual CPU structure.
    14221421 *
    14231422 * @remarks This is called with preemption disabled.
    14241423 */
    1425 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
     1424VMMR0_INT_DECL(int) HMR0Enter(PVMCPU pVCpu)
    14261425{
    14271426    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
     
    14301429
    14311430    /* Load the bare minimum state required for entering HM. */
    1432     int rc = HMR0EnterCpu(pVCpu);
     1431    int rc = hmR0EnterCpu(pVCpu);
    14331432    AssertRCReturn(rc, rc);
    14341433
     
    14521451    }
    14531452
    1454     rc = g_HmR0.pfnEnterSession(pVM, pVCpu, pCpu);
     1453    rc = g_HmR0.pfnEnterSession(pVCpu, pCpu);
    14551454    AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
    14561455
     
    15371536VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
    15381537{
     1538    RT_NOREF(pVM);
     1539
    15391540#ifdef VBOX_STRICT
    15401541    /* With thread-context hooks we would be running this code with preemption enabled. */
     
    15541555#endif
    15551556
    1556     VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pVM, pVCpu, &pVCpu->cpum.GstCtx);
     1557    VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pVCpu, &pVCpu->cpum.GstCtx);
    15571558
    15581559#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    16691670    PVM pVM = pVCpu->CTX_SUFF(pVM);
    16701671    if (pVM->hm.s.vmx.fSupported)
    1671         return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
    1672     return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
     1672        return VMXR0InvalidatePage(pVCpu, GCVirt);
     1673    return SVMR0InvalidatePage(pVCpu, GCVirt);
    16731674}
    16741675
     
    17281729
    17291730#ifdef VBOX_WITH_RAW_MODE
    1730 
    17311731/**
    17321732 * Raw-mode switcher hook - disable VT-x if it's active *and* the current
     
    18231823    }
    18241824}
    1825 
    18261825#endif /* VBOX_WITH_RAW_MODE */
     1826
     1827
    18271828#ifdef VBOX_STRICT
    1828 
    18291829/**
    18301830 * Dumps a descriptor.
     
    19531953 * Formats a full register dump.
    19541954 *
    1955  * @param   pVM         The cross context VM structure.
    19561955 * @param   pVCpu       The cross context virtual CPU structure.
    19571956 * @param   pCtx        Pointer to the CPU context.
    19581957 */
    1959 VMMR0_INT_DECL(void) hmR0DumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1960 {
    1961     NOREF(pVM);
    1962 
     1958VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     1959{
    19631960    /*
    19641961     * Format the flags.
     
    20982095    NOREF(pFpuCtx);
    20992096}
    2100 
    21012097#endif /* VBOX_STRICT */
    21022098
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72786 r72805  
    7373# endif
    7474#endif /* !VBOX_WITH_STATISTICS */
    75 
    7675
    7776/** If we decide to use a function table approach this can be useful to
     
    528527 *
    529528 * @returns VBox status code.
    530  * @param   pCpu            Pointer to the CPU info struct.
     529 * @param   pHostCpu        Pointer to the CPU info struct.
    531530 * @param   pVM             The cross context VM structure. Can be
    532531 *                          NULL after a resume!
     
    536535 * @param   pvArg           Unused on AMD-V.
    537536 */
    538 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     537VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    539538                              void *pvArg)
    540539{
     
    560559        if (   pVM
    561560            && pVM->hm.s.svm.fIgnoreInUseError)
    562             pCpu->fIgnoreAMDVInUseError = true;
    563 
    564         if (!pCpu->fIgnoreAMDVInUseError)
     561            pHostCpu->fIgnoreAMDVInUseError = true;
     562
     563        if (!pHostCpu->fIgnoreAMDVInUseError)
    565564        {
    566565            ASMSetFlags(fEFlags);
     
    584583     * entirely with before executing any guest code.
    585584     */
    586     pCpu->fFlushAsidBeforeUse = true;
     585    pHostCpu->fFlushAsidBeforeUse = true;
    587586
    588587    /*
    589588     * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
    590589     */
    591     ++pCpu->cTlbFlushes;
     590    ++pHostCpu->cTlbFlushes;
    592591
    593592    return VINF_SUCCESS;
     
    599598 *
    600599 * @returns VBox status code.
    601  * @param   pCpu            Pointer to the CPU info struct.
     600 * @param   pHostCpu        Pointer to the CPU info struct.
    602601 * @param   pvCpuPage       Pointer to the global CPU page.
    603602 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    604603 */
    605 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     604VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    606605{
    607606    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    609608                 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
    610609    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    611     NOREF(pCpu);
     610    RT_NOREF(pHostCpu);
    612611
    613612    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
     
    11691168 *
    11701169 * @returns VBox status code.
    1171  * @param   pVM         The cross context VM structure.
    11721170 * @param   pVCpu       The cross context virtual CPU structure.
    11731171 * @param   GCVirt      Guest virtual address of the page to invalidate.
    11741172 */
    1175 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    1176 {
    1177     AssertReturn(pVM, VERR_INVALID_PARAMETER);
    1178     Assert(pVM->hm.s.svm.fSupported);
    1179 
    1180     bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
     1173VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
     1174{
     1175    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
     1176
     1177    bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
    11811178
    11821179    /* Skip it if a TLB flush is already pending. */
     
    16101607    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    16111608
    1612     Assert(RT_HI_U32(uShadowCr0) == 0);
     1609    Assert(!RT_HI_U32(uShadowCr0));
    16131610    if (pVmcb->guest.u64CR0 != uShadowCr0)
    16141611    {
     
    17231720
    17241721    /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
    1725     Assert(RT_HI_U32(uShadowCr4) == 0);
     1722    Assert(!RT_HI_U32(uShadowCr4));
    17261723    pVmcb->guest.u64CR4 = uShadowCr4;
    17271724    pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
     
    23372334 *
    23382335 * @returns VBox status code.
    2339  * @param   pVM         The cross context VM structure.
    23402336 * @param   pVCpu       The cross context virtual CPU structure.
    2341  * @param   pCpu        Pointer to the CPU info struct.
    2342  */
    2343 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    2344 {
    2345     AssertPtr(pVM);
     2337 * @param   pHostCpu    Pointer to the CPU info struct.
     2338 */
     2339VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     2340{
    23462341    AssertPtr(pVCpu);
    2347     Assert(pVM->hm.s.svm.fSupported);
     2342    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
    23482343    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2349     NOREF(pVM); NOREF(pCpu);
    2350 
    2351     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     2344    RT_NOREF(pHostCpu);
     2345
     2346    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    23522347    Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
    23532348                                   == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
     
    24102405             * initializing AMD-V if necessary (onlined CPUs, local init etc.)
    24112406             */
    2412             int rc = HMR0EnterCpu(pVCpu);
     2407            int rc = hmR0EnterCpu(pVCpu);
    24132408            AssertRC(rc); NOREF(rc);
    24142409            Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
     
    31263121    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
    31273122    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
    3128     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    3129     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     3123    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
     3124    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
    31303125    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    31313126
     
    32553250 *
    32563251 * @returns VBox status code.
    3257  * @param   pVM         The cross context VM structure.
    32583252 * @param   pVCpu       The cross context virtual CPU structure.
    32593253 * @param   pCtx        Pointer to the guest-CPU context.
     
    32613255 *                      VINF_VMM_UNKNOWN_RING3_CALL.
    32623256 */
    3263 static int hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
    3264 {
    3265     Assert(pVM);
     3257static int hmR0SvmExitToRing3(PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
     3258{
    32663259    Assert(pVCpu);
    32673260    Assert(pCtx);
     
    32703263    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    32713264    VMMRZCallRing3Disable(pVCpu);
    3272     Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions));
     3265    Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions,
     3266              pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
    32733267
    32743268    /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
     
    32903284                               | CPUM_CHANGED_TR
    32913285                               | CPUM_CHANGED_HIDDEN_SEL_REGS);
    3292     if (   pVM->hm.s.fNestedPaging
     3286    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
    32933287        && CPUMIsGuestPagingEnabledEx(pCtx))
    32943288    {
     
    33313325 * intercepts.
    33323326 *
    3333  * @param   pVM         The cross context VM structure.
    3334  * @param   pVCpu       The cross context virtual CPU structure.
    33353327 * @param   pCtx        Pointer to the guest-CPU or nested-guest-CPU context.
    33363328 * @param   pVmcb       Pointer to the VM control block.
     
    33383330 * @remarks No-long-jump zone!!!
    33393331 */
    3340 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
     3332static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
    33413333{
    33423334    /*
     
    33473339    bool       fParavirtTsc;
    33483340    uint64_t   uTscOffset;
    3349     bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
     3341    bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
    33503342
    33513343    bool fIntercept;
     
    40794071 * Reports world-switch error and dumps some useful debug info.
    40804072 *
    4081  * @param   pVM             The cross context VM structure.
    40824073 * @param   pVCpu           The cross context virtual CPU structure.
    40834074 * @param   rcVMRun         The return code from VMRUN (or
     
    40864077 * @param   pCtx            Pointer to the guest-CPU context.
    40874078 */
    4088 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
    4089 {
    4090     NOREF(pCtx);
     4079static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
     4080{
    40914081    HMSVM_ASSERT_PREEMPT_SAFE();
    40924082    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    40934083    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    40944084
    4095     PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    40964085    if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
    40974086    {
    4098         hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
    4099         /** @todo We probably don't need to dump this anymore or we can expand
    4100          *        hmR0DumpRegs()? */
    41014087#ifdef VBOX_STRICT
     4088        hmR0DumpRegs(pVCpu, pCtx);
     4089        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    41024090        Log4(("ctrl.u32VmcbCleanBits                 %#RX32\n",   pVmcb->ctrl.u32VmcbCleanBits));
    41034091        Log4(("ctrl.u16InterceptRdCRx                %#x\n",      pVmcb->ctrl.u16InterceptRdCRx));
     
    42254213        Log4Func(("rcVMRun=%d\n", rcVMRun));
    42264214
    4227     NOREF(pVmcb);
     4215    NOREF(pCtx);
    42284216}
    42294217
     
    42454233 *         to the EM loop.
    42464234 *
    4247  * @param   pVM         The cross context VM structure.
    42484235 * @param   pVCpu       The cross context virtual CPU structure.
    42494236 * @param   pCtx        Pointer to the guest-CPU context.
    42504237 */
    4251 static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4238static int hmR0SvmCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pCtx)
    42524239{
    42534240    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    42624249        APICUpdatePendingInterrupts(pVCpu);
    42634250
     4251    PVM pVM = pVCpu->CTX_SUFF(pVM);
    42644252    if (   VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
    42654253                            ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
     
    43244312 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    43254313 *
    4326  * @param   pVM             The cross context VM structure.
    43274314 * @param   pVCpu           The cross context virtual CPU structure.
    43284315 * @param   pCtx            Pointer to the nested-guest-CPU context.
     
    43324319 * @sa      hmR0SvmPreRunGuest.
    43334320 */
    4334 static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4321static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43354322{
    43364323    HMSVM_ASSERT_PREEMPT_SAFE();
     
    43434330
    43444331    /* Check force flag actions that might require us to go back to ring-3. */
    4345     int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     4332    int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx);
    43464333    if (rc != VINF_SUCCESS)
    43474334        return rc;
     
    43644351     * NB: If we could continue a task switch exit we wouldn't need to do this.
    43654352     */
     4353    PVM pVM = pVCpu->CTX_SUFF(pVM);
    43664354    if (RT_UNLIKELY(   !pVM->hm.s.svm.u32Features
    43674355                    &&  pVCpu->hm.s.Event.fPending
     
    44464434 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    44474435 *
    4448  * @param   pVM             The cross context VM structure.
    44494436 * @param   pVCpu           The cross context virtual CPU structure.
    44504437 * @param   pCtx            Pointer to the guest-CPU context.
    44514438 * @param   pSvmTransient   Pointer to the SVM transient structure.
    44524439 */
    4453 static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4440static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    44544441{
    44554442    HMSVM_ASSERT_PREEMPT_SAFE();
     
    44574444
    44584445    /* Check force flag actions that might require us to go back to ring-3. */
    4459     int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     4446    int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx);
    44604447    if (rc != VINF_SUCCESS)
    44614448        return rc;
     
    44714458     * NB: If we could continue a task switch exit we wouldn't need to do this.
    44724459     */
     4460    PVM pVM = pVCpu->CTX_SUFF(pVM);
    44734461    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
    44744462        if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
     
    46044592        || fMigratedHostCpu)
    46054593    {
    4606         hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pCtx, pVmcb);
     4594        hmR0SvmUpdateTscOffsetting(pVCpu, pCtx, pVmcb);
    46074595        pSvmTransient->fUpdateTscOffsetting = false;
    46084596    }
     
    46874675
    46884676/**
    4689  * Wrapper for running the guest code in AMD-V.
     4677 * Wrapper for running the guest (or nested-guest) code in AMD-V.
    46904678 *
    46914679 * @returns VBox strict status code.
    4692  * @param   pVM         The cross context VM structure.
    46934680 * @param   pVCpu       The cross context virtual CPU structure.
    46944681 * @param   pCtx        Pointer to the guest-CPU context.
     4682 * @param   HCPhysVmcb  The host physical address of the VMCB.
    46954683 *
    46964684 * @remarks No-long-jump zone!!!
    46974685 */
    4698 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4686DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, RTHCPHYS HCPhysVmcb)
    46994687{
    47004688    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     
    47084696     * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    47094697     */
     4698    PVM pVM = pVCpu->CTX_SUFF(pVM);
    47104699#ifdef VBOX_WITH_KERNEL_USING_XMM
    4711     return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
    4712                              pVCpu->hm.s.svm.pfnVMRun);
     4700    return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun);
    47134701#else
    4714     return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
    4715 #endif
    4716 }
    4717 
    4718 
    4719 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    4720 /**
    4721  * Wrapper for running the nested-guest code in AMD-V.
    4722  *
    4723  * @returns VBox strict status code.
    4724  * @param   pVM         The cross context VM structure.
    4725  * @param   pVCpu       The cross context virtual CPU structure.
    4726  * @param   pCtx        Pointer to the guest-CPU context.
    4727  *
    4728  * @remarks No-long-jump zone!!!
    4729  */
    4730 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    4731 {
    4732     /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
    4733     pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
    4734 
    4735     /*
    4736      * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
    4737      * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
    4738      * callee-saved and thus the need for this XMM wrapper.
    4739      *
    4740      * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    4741      */
    4742 #ifdef VBOX_WITH_KERNEL_USING_XMM
    4743     return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
    4744                              pVCpu->hm.s.svm.pfnVMRun);
    4745 #else
    4746     return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
     4702    return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu);
    47474703#endif
    47484704}
     
    47684724    return uTicks - pVmcbNstGstCache->u64TSCOffset;
    47694725}
    4770 #endif
     4726
    47714727
    47724728/**
     
    48194775    }
    48204776
    4821     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     4777    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
    48224778    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
    48234779    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     
    49084864 *
    49094865 * @returns VBox status code.
    4910  * @param   pVM         The cross context VM structure.
    49114866 * @param   pVCpu       The cross context virtual CPU structure.
    49124867 * @param   pCtx        Pointer to the guest-CPU context.
    49134868 * @param   pcLoops     Pointer to the number of executed loops.
    49144869 */
    4915 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    4916 {
    4917     uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     4870static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     4871{
     4872    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
    49184873    Assert(pcLoops);
    49194874    Assert(*pcLoops <= cMaxResumeLoops);
     
    49334888           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    49344889        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    4935         rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
     4890        rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient);
    49364891        if (rc != VINF_SUCCESS)
    49374892            break;
     
    49444899         */
    49454900        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    4946         rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4901        rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb);
    49474902
    49484903        /* Restore any residual host-state and save any bits shared between host and guest
     
    49554910            if (rc == VINF_SUCCESS)
    49564911                rc = VERR_SVM_INVALID_GUEST_STATE;
    4957             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    4958             hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
     4912            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     4913            hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx);
    49594914            break;
    49604915        }
     
    49624917        /* Handle the #VMEXIT. */
    49634918        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    4964         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     4919        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    49654920        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    49664921        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    4967         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     4922        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    49684923        if (rc != VINF_SUCCESS)
    49694924            break;
     
    49854940 *
    49864941 * @returns VBox status code.
    4987  * @param   pVM         The cross context VM structure.
    49884942 * @param   pVCpu       The cross context virtual CPU structure.
    49894943 * @param   pCtx        Pointer to the guest-CPU context.
    49904944 * @param   pcLoops     Pointer to the number of executed loops.
    49914945 */
    4992 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    4993 {
    4994     uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     4946static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     4947{
     4948    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
    49954949    Assert(pcLoops);
    49964950    Assert(*pcLoops <= cMaxResumeLoops);
     
    50154969           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    50164970        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    5017         rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
     4971        rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient);
    50184972        if (rc != VINF_SUCCESS)
    50194973            break;
     
    50294983        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    50304984
    5031         rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4985        rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb);
    50324986
    50334987        /* Restore any residual host-state and save any bits shared between host and guest
     
    50404994            if (rc == VINF_SUCCESS)
    50414995                rc = VERR_SVM_INVALID_GUEST_STATE;
    5042             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    5043             hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
     4996            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     4997            hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx);
    50444998            return rc;
    50454999        }
     
    50475001        /* Handle the #VMEXIT. */
    50485002        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    5049         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     5003        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    50505004        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    50515005        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    5052         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     5006        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    50535007        if (rc != VINF_SUCCESS)
    50545008            break;
     
    50915045 *
    50925046 * @returns VBox status code.
    5093  * @param   pVM         The cross context VM structure.
    50945047 * @param   pVCpu       The cross context virtual CPU structure.
    50955048 * @param   pCtx        Pointer to the guest-CPU context.
     
    50985051 *                      execution loop pass the remainder value, else pass 0.
    50995052 */
    5100 static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     5053static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    51015054{
    51025055    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    51035056    Assert(pcLoops);
    5104     Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops);
     5057    Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops);
    51055058
    51065059    SVMTRANSIENT SvmTransient;
     
    51195072           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    51205073        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    5121         rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient);
     5074        rc = hmR0SvmPreRunGuestNested(pVCpu, pCtx, &SvmTransient);
    51225075        if (    rc != VINF_SUCCESS
    51235076            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     
    51345087        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    51355088
    5136         rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
     5089        rc = hmR0SvmRunGuest(pVCpu, pCtx, pCtx->hwvirt.svm.HCPhysVmcb);
    51375090
    51385091        /* Restore any residual host-state and save any bits shared between host and guest
     
    51585111        /* Handle the #VMEXIT. */
    51595112        HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    5160         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     5113        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    51615114        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
    51625115        rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient);
    5163         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     5116        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    51645117        if (    rc != VINF_SUCCESS
    51655118            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    51665119            break;
    5167         if (++(*pcLoops) >= pVM->hm.s.cMaxResumeLoops)
     5120        if (++(*pcLoops) >= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
    51685121        {
    51695122            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    51855138 *
    51865139 * @returns Strict VBox status code.
    5187  * @param   pVM         The cross context VM structure.
    51885140 * @param   pVCpu       The cross context virtual CPU structure.
    51895141 * @param   pCtx        Pointer to the guest-CPU context.
    51905142 */
    5191 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     5143VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx)
    51925144{
    51935145    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    52025154    {
    52035155        if (!pVCpu->hm.s.fSingleInstruction)
    5204             rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx, &cLoops);
     5156            rc = hmR0SvmRunGuestCodeNormal(pVCpu, pCtx, &cLoops);
    52055157        else
    5206             rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, &cLoops);
     5158            rc = hmR0SvmRunGuestCodeStep(pVCpu, pCtx, &cLoops);
    52075159    }
    52085160#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     
    52165168    if (rc == VINF_SVM_VMRUN)
    52175169    {
    5218         rc = hmR0SvmRunGuestCodeNested(pVM, pVCpu, pCtx, &cLoops);
     5170        rc = hmR0SvmRunGuestCodeNested(pVCpu, pCtx, &cLoops);
    52195171        if (rc == VINF_SVM_VMEXIT)
    52205172            rc = VINF_SUCCESS;
     
    52295181
    52305182    /* Prepare to return to ring-3. This will remove longjmp notifications. */
    5231     rc = hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
     5183    rc = hmR0SvmExitToRing3(pVCpu, pCtx, rc);
    52325184    Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
    52335185    return rc;
     
    59545906 * @retval  VERR_*                  Fatal errors.
    59555907 *
    5956  * @param   pVM         The cross context VM structure.
    59575908 * @param   pVCpu       The cross context virtual CPU structure.
    59585909 * @param   pCtx        The guest CPU context.
     
    59605911 * @remarks Updates the RIP if the instruction was executed successfully.
    59615912 */
    5962 static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     5913static int hmR0SvmInterpretInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx)
    59635914{
    59645915    /* Only allow 32 & 64 bit code. */
     
    59665917    {
    59675918        PDISSTATE pDis = &pVCpu->hm.s.DisState;
    5968         int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
     5919        int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, NULL /* pcbInstr */);
    59695920        if (   RT_SUCCESS(rc)
    59705921            && pDis->pCurInstr->uOpcode == OP_INVLPG)
     
    64476398{
    64486399    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6449     PVM pVM = pVCpu->CTX_SUFF(pVM);
    6450     Assert(!pVM->hm.s.fNestedPaging);
     6400    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    64516401
    64526402    bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx);
     
    64656415
    64666416    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    6467     int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx);    /* Updates RIP if successful. */
     6417    int rc = hmR0SvmInterpretInvlpg(pVCpu, pCtx);    /* Updates RIP if successful. */
    64686418    Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
    64696419    HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
     
    69976947    static uint32_t const s_aIOSize[8]  = { 0, 1, 2, 0, 4, 0, 0, 0 };                   /* Size of the I/O accesses in bytes. */
    69986948    static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 };  /* AND masks for saving
    6999                                                                                             the result (in AL/AX/EAX). */
     6949                                                                                           the result (in AL/AX/EAX). */
    70006950    Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
    70016951
     
    72437193     */
    72447194    if (   pVM->hm.s.fTprPatchingAllowed
    7245         && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    72467195        && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
    72477196        && (   !(u32ErrCode & X86_TRAP_PF_P)                                                             /* Not present */
    72487197            || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD))  /* MMIO page. */
     7198        && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    72497199        && !CPUMIsGuestInLongModeEx(pCtx)
    72507200        && !CPUMGetGuestCPL(pVCpu)
     
    80497999    return VINF_SUCCESS;
    80508000}
    8051 
    80528001#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
    80538002
    8054 
    80558003/** @} */
    80568004
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r72744 r72805  
    4040VMMR0DECL(int)          SVMR0GlobalInit(void);
    4141VMMR0DECL(void)         SVMR0GlobalTerm(void);
    42 VMMR0DECL(int)          SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
     42VMMR0DECL(int)          SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu);
    4343VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    44 VMMR0DECL(int)          SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
     44VMMR0DECL(int)          SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
    4545                                       bool fEnabledBySystem, void *pvArg);
    46 VMMR0DECL(int)          SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     46VMMR0DECL(int)          SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    4747VMMR0DECL(int)          SVMR0InitVM(PVM pVM);
    4848VMMR0DECL(int)          SVMR0TermVM(PVM pVM);
    4949VMMR0DECL(int)          SVMR0SetupVM(PVM pVM);
    50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     50VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx);
    5151VMMR0DECL(int)          SVMR0ExportHostState(PVMCPU pVCpu);
    5252VMMR0DECL(int)          SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
    53 VMMR0DECL(int)          SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
     53VMMR0DECL(int)          SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
    5454
    5555#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72802 r72805  
    371371*********************************************************************************************************************************/
    372372static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
    373 static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
     373static void               hmR0VmxFlushVpid(PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
    374374static void               hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
    375375static int                hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
     
    377377                                                 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
    378378#if HC_ARCH_BITS == 32
    379 static int                hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
     379static int                hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
    380380#endif
    381381#ifndef HMVMX_USE_FUNCTION_TABLE
     
    446446static int          hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    447447static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    448 static uint32_t     hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     448static uint32_t     hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx);
    449449
    450450
     
    570570 * updates VCPU's last error record as well.
    571571 *
    572  * @param   pVM     The cross context VM structure.
    573572 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    574573 *                  Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
     
    576575 * @param   rc      The error code.
    577576 */
    578 static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
    579 {
    580     AssertPtr(pVM);
     577static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
     578{
    581579    if (   rc == VERR_VMX_INVALID_VMCS_FIELD
    582580        || rc == VERR_VMX_UNABLE_TO_START_VM)
     
    585583        VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    586584    }
    587     pVM->hm.s.lLastError = rc;
     585    pVCpu->CTX_SUFF(pVM)->hm.s.lLastError = rc;
    588586}
    589587
     
    10891087 *
    10901088 * @returns VBox status code.
    1091  * @param   pCpu            Pointer to the global CPU info struct.
     1089 * @param   pHostCpu        Pointer to the global CPU info struct.
    10921090 * @param   pVM             The cross context VM structure.  Can be
    10931091 *                          NULL after a host resume operation.
     
    11001098 * @param   pvMsrs          Opaque pointer to VMXMSRS struct.
    11011099 */
    1102 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     1100VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    11031101                              void *pvMsrs)
    11041102{
    1105     Assert(pCpu);
     1103    Assert(pHostCpu);
    11061104    Assert(pvMsrs);
    11071105    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    11241122    {
    11251123        hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
    1126         pCpu->fFlushAsidBeforeUse = false;
     1124        pHostCpu->fFlushAsidBeforeUse = false;
    11271125    }
    11281126    else
    1129         pCpu->fFlushAsidBeforeUse = true;
     1127        pHostCpu->fFlushAsidBeforeUse = true;
    11301128
    11311129    /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
    1132     ++pCpu->cTlbFlushes;
     1130    ++pHostCpu->cTlbFlushes;
    11331131
    11341132    return VINF_SUCCESS;
     
    11401138 *
    11411139 * @returns VBox status code.
    1142  * @param   pCpu            Pointer to the global CPU info struct.
     1140 * @param   pHostCpu        Pointer to the global CPU info struct.
    11431141 * @param   pvCpuPage       Pointer to the VMXON region.
    11441142 * @param   HCPhysCpuPage   Physical address of the VMXON region.
     
    11471145 *          similar was used to enable VT-x on the host.
    11481146 */
    1149 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    1150 {
    1151     NOREF(pCpu);
    1152     NOREF(pvCpuPage);
    1153     NOREF(HCPhysCpuPage);
     1147VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     1148{
     1149    RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
    11541150
    11551151    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    17871783 *
    17881784 * @returns VBox status code.
    1789  * @param   pVM         The cross context VM structure.
    17901785 * @param   pVCpu       The cross context virtual CPU structure of the calling
    17911786 *                      EMT.  Can be NULL depending on @a enmFlush.
     
    17961791 * @remarks Can be called with interrupts disabled.
    17971792 */
    1798 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
    1799 {
    1800     NOREF(pVM);
    1801     AssertPtr(pVM);
    1802     Assert(pVM->hm.s.vmx.fVpid);
     1793static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
     1794{
     1795    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
    18031796
    18041797    uint64_t au64Descriptor[2];
     
    18171810    }
    18181811
    1819     int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
    1820     AssertMsg(rc == VINF_SUCCESS,
    1821               ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
     1812    int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]);
     1813    AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmFlush,
     1814                                  pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
    18221815    if (   RT_SUCCESS(rc)
    18231816        && pVCpu)
     
    18251818        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
    18261819    }
     1820    NOREF(rc);
    18271821}
    18281822
     
    18331827 *
    18341828 * @returns VBox status code.
    1835  * @param   pVM         The cross context VM structure.
    18361829 * @param   pVCpu       The cross context virtual CPU structure.
    18371830 * @param   GCVirt      Guest virtual address of the page to invalidate.
    18381831 */
    1839 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    1840 {
    1841     AssertPtr(pVM);
     1832VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
     1833{
    18421834    AssertPtr(pVCpu);
    1843     LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
     1835    LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
    18441836
    18451837    bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
     
    18531845         * as this function maybe called in a loop with individual addresses.
    18541846         */
     1847        PVM pVM = pVCpu->CTX_SUFF(pVM);
    18551848        if (pVM->hm.s.vmx.fVpid)
    18561849        {
     
    18691862            if (fVpidFlush)
    18701863            {
    1871                 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
     1864                hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
    18721865                STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
    18731866            }
     
    18871880 * case where neither EPT nor VPID is supported by the CPU.
    18881881 *
    1889  * @param   pVM             The cross context VM structure.
    18901882 * @param   pVCpu           The cross context virtual CPU structure.
    18911883 * @param   pCpu            Pointer to the global HM struct.
     
    18931885 * @remarks Called with interrupts disabled.
    18941886 */
    1895 static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1887static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    18961888{
    18971889    AssertPtr(pVCpu);
    18981890    AssertPtr(pCpu);
    1899     NOREF(pVM);
    19001891
    19011892    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
     
    19121903 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
    19131904 *
    1914  * @param    pVM            The cross context VM structure.
    19151905 * @param    pVCpu          The cross context virtual CPU structure.
    19161906 * @param    pCpu           Pointer to the global HM CPU struct.
    1917  * @remarks All references to "ASID" in this function pertains to "VPID" in
    1918  *          Intel's nomenclature. The reason is, to avoid confusion in compare
    1919  *          statements since the host-CPU copies are named "ASID".
    1920  *
    1921  * @remarks Called with interrupts disabled.
    1922  */
    1923 static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1907 *
     1908 * @remarks  All references to "ASID" in this function pertains to "VPID" in Intel's
     1909 *           nomenclature. The reason is, to avoid confusion in compare statements
     1910 *           since the host-CPU copies are named "ASID".
     1911 *
     1912 * @remarks  Called with interrupts disabled.
     1913 */
     1914static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    19241915{
    19251916#ifdef VBOX_WITH_STATISTICS
     
    19351926#endif
    19361927
    1937     AssertPtr(pVM);
    19381928    AssertPtr(pCpu);
    19391929    AssertPtr(pVCpu);
    19401930    Assert(pCpu->idCpu != NIL_RTCPUID);
    19411931
     1932    PVM pVM = pVCpu->CTX_SUFF(pVM);
    19421933    AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
    19431934              ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
     
    20172008 *
    20182009 * @returns VBox status code.
    2019  * @param   pVM         The cross context VM structure.
    20202010 * @param   pVCpu       The cross context virtual CPU structure.
    20212011 * @param   pCpu        Pointer to the global HM CPU struct.
     
    20232013 * @remarks Called with interrupts disabled.
    20242014 */
    2025 static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    2026 {
    2027     AssertPtr(pVM);
     2015static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2016{
    20282017    AssertPtr(pVCpu);
    20292018    AssertPtr(pCpu);
    20302019    Assert(pCpu->idCpu != NIL_RTCPUID);
    2031     AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
    2032     AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
     2020    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
     2021    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
    20332022
    20342023    /*
     
    20552044    if (pVCpu->hm.s.fForceTLBFlush)
    20562045    {
    2057         hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     2046        hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmFlushEpt);
    20582047        pVCpu->hm.s.fForceTLBFlush = false;
    20592048    }
     
    20652054 *
    20662055 * @returns VBox status code.
    2067  * @param   pVM         The cross context VM structure.
    20682056 * @param   pVCpu       The cross context virtual CPU structure.
    20692057 * @param   pCpu        Pointer to the global HM CPU struct.
     
    20712059 * @remarks Called with interrupts disabled.
    20722060 */
    2073 static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    2074 {
    2075     AssertPtr(pVM);
     2061static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2062{
    20762063    AssertPtr(pVCpu);
    20772064    AssertPtr(pCpu);
    20782065    Assert(pCpu->idCpu != NIL_RTCPUID);
    2079     AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
    2080     AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
     2066    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
     2067    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
    20812068
    20822069    /*
    2083      * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
    2084      * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
    2085      * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
     2070     * Force a TLB flush for the first world switch if the current CPU differs from the one we
     2071     * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
     2072     * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
     2073     * cannot reuse the current ASID anymore.
    20862074     */
    20872075    if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
     
    20962084    {
    20972085        /*
    2098          * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
    2099          * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
    2100          * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
     2086         * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
     2087         * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
     2088         * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to
     2089         * include fExplicitFlush's too) - an obscure corner case.
    21012090         */
    21022091        pVCpu->hm.s.fForceTLBFlush = true;
     
    21042093    }
    21052094
     2095    PVM pVM = pVCpu->CTX_SUFF(pVM);
    21062096    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
    21072097    if (pVCpu->hm.s.fForceTLBFlush)
     
    21212111        {
    21222112            if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
    2123                 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
     2113                hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
    21242114            else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
    21252115            {
    2126                 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
     2116                hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
    21272117                pCpu->fFlushAsidBeforeUse = false;
    21282118            }
     
    21622152    switch (pVM->hm.s.vmx.uFlushTaggedTlb)
    21632153    {
    2164         case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
    2165         case HMVMX_FLUSH_TAGGED_TLB_EPT:      hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu);  break;
    2166         case HMVMX_FLUSH_TAGGED_TLB_VPID:     hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
    2167         case HMVMX_FLUSH_TAGGED_TLB_NONE:     hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
     2154        case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break;
     2155        case HMVMX_FLUSH_TAGGED_TLB_EPT:      hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu);  break;
     2156        case HMVMX_FLUSH_TAGGED_TLB_VPID:     hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break;
     2157        case HMVMX_FLUSH_TAGGED_TLB_NONE:     hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break;
    21682158        default:
    21692159            AssertMsgFailed(("Invalid flush-tag function identifier\n"));
     
    22792269 *
    22802270 * @returns VBox status code.
    2281  * @param   pVM         The cross context VM structure.
    22822271 * @param   pVCpu       The cross context virtual CPU structure.
    22832272 */
    2284 static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
    2285 {
    2286     AssertPtr(pVM);
     2273static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
     2274{
    22872275    AssertPtr(pVCpu);
    22882276
     2277    PVM pVM = pVCpu->CTX_SUFF(pVM);
    22892278    uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;         /* Bits set here must always be set. */
    22902279    uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;            /* Bits cleared here must always be cleared. */
     
    23332322 *
    23342323 * @returns VBox status code.
    2335  * @param   pVM         The cross context VM structure.
    23362324 * @param   pVCpu       The cross context virtual CPU structure.
    23372325 */
    2338 static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
    2339 {
    2340     AssertPtr(pVM);
     2326static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
     2327{
    23412328    AssertPtr(pVCpu);
    23422329
    23432330    int rc = VERR_INTERNAL_ERROR_5;
     2331    PVM pVM = pVCpu->CTX_SUFF(pVM);
    23442332    uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
    23452333    uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     
    25552543 *
    25562544 * @returns VBox status code.
    2557  * @param   pVM         The cross context VM structure.
    25582545 * @param   pVCpu       The cross context virtual CPU structure.
    25592546 */
    2560 static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
    2561 {
    2562     NOREF(pVM);
    2563     AssertPtr(pVM);
     2547static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu)
     2548{
    25642549    AssertPtr(pVCpu);
    25652550
     
    26272612 *
    26282613 * @returns VBox status code.
    2629  * @param   pVM         The cross context VM structure.
    26302614 * @param   pVCpu       The cross context virtual CPU structure.
    26312615 */
    2632 static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
    2633 {
    2634     AssertPtr(pVM);
     2616static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu)
     2617{
    26352618    AssertPtr(pVCpu);
    26362619
    2637     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    2638 
    2639     uint32_t u32XcptBitmap = 0;
     2620    uint32_t u32XcptBitmap;
    26402621
    26412622    /* Must always intercept #AC to prevent the guest from hanging the CPU. */
    2642     u32XcptBitmap |= RT_BIT_32(X86_XCPT_AC);
     2623    u32XcptBitmap = RT_BIT_32(X86_XCPT_AC);
    26432624
    26442625    /* Because we need to maintain the DR6 state even when intercepting DRx reads
     
    26482629
    26492630    /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
    2650     if (!pVM->hm.s.fNestedPaging)
     2631    if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    26512632        u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
    26522633
    2653     pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
     2634    /* Commit it to the VMCS. */
    26542635    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
    26552636    AssertRCReturn(rc, rc);
    2656     return rc;
     2637
     2638    /* Update our cache of the exception bitmap. */
     2639    pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
     2640    return VINF_SUCCESS;
    26572641}
    26582642
     
    27672751        /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
    27682752        rc  = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2769         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2770                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
     2753        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc),
     2754                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    27712755
    27722756        /* Load this VMCS as the current VMCS. */
    27732757        rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2774         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2775                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    2776 
    2777         rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
    2778         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2779                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    2780 
    2781         rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
    2782         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2783                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    2784 
    2785         rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
    2786         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2787                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    2788 
    2789         rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
    2790         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2791                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
     2758        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc),
     2759                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
     2760
     2761        rc = hmR0VmxSetupPinCtls(pVCpu);
     2762        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc),
     2763                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
     2764
     2765        rc = hmR0VmxSetupProcCtls(pVCpu);
     2766        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc),
     2767                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
     2768
     2769        rc = hmR0VmxSetupMiscCtls(pVCpu);
     2770        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc),
     2771                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
     2772
     2773        rc = hmR0VmxInitXcptBitmap(pVCpu);
     2774        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc),
     2775                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    27922776
    27932777#if HC_ARCH_BITS == 32
    2794         rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
    2795         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2796                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
     2778        rc = hmR0VmxInitVmcsReadCache(pVCpu);
     2779        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc),
     2780                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    27972781#endif
    27982782
    27992783        /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
    28002784        rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2801         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2802                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
     2785        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc),
     2786                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    28032787
    28042788        pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
    28052789
    2806         hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
     2790        hmR0VmxUpdateErrorRecord(pVCpu, rc);
    28072791    }
    28082792
     
    29812965
    29822966    /*
    2983      * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
    2984      * and Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the
    2985      * limit as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU
    2986      * behavior.  However, several hosts either insists on 0xfff being the limit (Windows
    2987      * Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
    2988      * but botches sidt alignment in at least one consumer).  So, we're only allowing the
    2989      * IDTR.LIMIT to be left at 0xffff on hosts where we are sure it won't cause trouble.
     2967     * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
     2968     * Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the limit
     2969     * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
     2970     * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
     2971     * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
     2972     * alignment in at least one consumer).  So, we're only allowing the IDTR.LIMIT to be left
     2973     * at 0xffff on hosts where we are sure it won't cause trouble.
    29902974     */
    29912975# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
     
    35983582        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    35993583           Let us assert it as such and use 32-bit VMWRITE. */
    3600         Assert(!(pMixedCtx->rflags.u64 >> 32));
     3584        Assert(!RT_HI_U32(pMixedCtx->rflags.u64));
    36013585        X86EFLAGS fEFlags = pMixedCtx->eflags;
    36023586        Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
     
    37703754            uCr0Mask &= ~X86_CR0_PE;
    37713755#endif
    3772         /* Update the HMCPU's copy of the CR0 mask. */
    3773         pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask;
    3774 
    37753756        /*
    37763757         * Finally, update VMCS fields with the CR0 values.
     
    37783759        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, uGuestCR0);
    37793760        rc    |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, uShadowCR0);
    3780         rc    |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask);
     3761        if (uCR0Mask != pVCpu->hm.s.vmx.u32CR0Mask)
     3762            rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask);
    37813763        if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
    37823764            rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    37833765        AssertRCReturn(rc, rc);
     3766
     3767        pVCpu->hm.s.vmx.u32CR0Mask  = uCR0Mask;
    37843768        pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
    37853769
     
    39223906    {
    39233907        Assert(!RT_HI_U32(pMixedCtx->cr4));
    3924         uint32_t uGuestCR4 = pMixedCtx->cr4;
    3925 
    3926         /* The guest's view of its CR4 is unblemished. */
    3927         rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uGuestCR4);
    3928         AssertRCReturn(rc, rc);
    3929         Log4Func(("uShadowCR4=%#RX32\n", uGuestCR4));
     3908        uint32_t       uGuestCR4  = pMixedCtx->cr4;
     3909        uint32_t const uShadowCR4 = pMixedCtx->cr4;
    39303910
    39313911        /*
     
    39973977        uGuestCR4 &= fZapCR4;
    39983978
    3999         /* Write VT-x's view of the guest CR4 into the VMCS. */
    4000         Log4Func(("uGuestCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, fSetCR4, fZapCR4));
    4001         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4);
    4002         AssertRCReturn(rc, rc);
    4003 
    40043979        /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
    40053980        uint32_t u32CR4Mask = X86_CR4_VME
     
    40123987        if (pVM->cpum.ro.GuestFeatures.fPcid)
    40133988            u32CR4Mask |= X86_CR4_PCIDE;
     3989
     3990        /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow into the VMCS. */
     3991        rc  = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4);
     3992        rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uShadowCR4);
     3993        if (pVCpu->hm.s.vmx.u32CR4Mask != u32CR4Mask)
     3994            rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
     3995        AssertRCReturn(rc, rc);
    40143996        pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
    4015         rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
    4016         AssertRCReturn(rc, rc);
    40173997
    40183998        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
     
    40204000
    40214001        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
     4002
     4003        Log4Func(("uGuestCR4=%#RX32 uShadowCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, uShadowCR4, fSetCR4,
     4004                  fZapCR4));
    40224005    }
    40234006    return rc;
     
    41854168 * Strict function to validate segment registers.
    41864169 *
     4170 * @param   pVCpu       The cross context virtual CPU structure.
     4171 * @param   pCtx        Pointer to the guest-CPU context.
     4172 *
    41874173 * @remarks Will import guest CR0 on strict builds during validation of
    41884174 *          segments.
    41894175 */
    4190 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx)
     4176static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pCtx)
    41914177{
    41924178    /*
     
    41974183     * and doesn't change the guest-context value.
    41984184     */
     4185    PVM pVM = pVCpu->CTX_SUFF(pVM);
    41994186    hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
    42004187    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
     
    42984285        /* 64-bit capable CPUs. */
    42994286# if HC_ARCH_BITS == 64
    4300         Assert(!(pCtx->cs.u64Base >> 32));
    4301         Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
    4302         Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
    4303         Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
     4287        Assert(!RT_HI_U32(pCtx->cs.u64Base));
     4288        Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
     4289        Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
     4290        Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
    43044291# endif
    43054292    }
     
    43474334        /* 64-bit capable CPUs. */
    43484335# if HC_ARCH_BITS == 64
    4349         Assert(!(pCtx->cs.u64Base >> 32));
    4350         Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
    4351         Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
    4352         Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
     4336        Assert(!RT_HI_U32(pCtx->cs.u64Base));
     4337        Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
     4338        Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
     4339        Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
    43534340# endif
    43544341    }
     
    44754462
    44764463#ifdef VBOX_STRICT
    4477         /* Validate. */
    4478         hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
     4464        hmR0VmxValidateSegmentRegs(pVCpu, pMixedCtx);
    44794465#endif
    44804466
     
    44944480    {
    44954481        /*
    4496          * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
    4497          * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
    4498          * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
     4482         * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
     4483         * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
     4484         * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
    44994485         */
    45004486        uint16_t u16Sel          = 0;
     
    48824868 *
    48834869 * @returns VBox status code, no informational status codes.
    4884  * @param   pVM         The cross context VM structure.
    48854870 * @param   pVCpu       The cross context virtual CPU structure.
    48864871 * @param   pCtx        Pointer to the guest-CPU context.
     
    48884873 * @remarks No-long-jump zone!!!
    48894874 */
    4890 DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4875DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
    48914876{
    48924877    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     
    49024887    bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
    49034888    /** @todo Add stats for resume vs launch. */
     4889    PVM pVM = pVCpu->CTX_SUFF(pVM);
    49044890#ifdef VBOX_WITH_KERNEL_USING_XMM
    49054891    int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
     
    49154901 * Reports world-switch error and dumps some useful debug info.
    49164902 *
    4917  * @param   pVM             The cross context VM structure.
    49184903 * @param   pVCpu           The cross context virtual CPU structure.
    49194904 * @param   rcVMRun         The return code from VMLAUNCH/VMRESUME.
     
    49224907 *                          exitReason updated).
    49234908 */
    4924 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
    4925 {
    4926     Assert(pVM);
     4909static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
     4910{
    49274911    Assert(pVCpu);
    49284912    Assert(pCtx);
     
    50074991                rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg);           AssertRC(rc);
    50084992                Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW           %#RHr\n", uHCReg));
    5009                 if (pVM->hm.s.fNestedPaging)
     4993                if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    50104994                {
    50114995                    rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val);             AssertRC(rc);
     
    50205004                rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);         AssertRC(rc);
    50215005                Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
    5022                 if (pVM->hm.s.vmx.fVpid)
     5006                if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
    50235007                {
    50245008                    rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val);           AssertRC(rc);
     
    51265110            break;
    51275111    }
    5128     NOREF(pVM); NOREF(pCtx);
     5112    NOREF(pCtx);
    51295113}
    51305114
     
    53415325 *
    53425326 * @returns VBox status code.
    5343  * @param   pVM         The cross context VM structure.
    53445327 * @param   pVCpu       The cross context virtual CPU structure.
    53455328 */
    5346 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
    5347 {
    5348 #define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField)       \
    5349 {                                                              \
    5350     Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0);    \
    5351     pCache->Read.aField[idxField##_CACHE_IDX] = idxField;      \
    5352     pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0;          \
    5353     ++cReadFields;                                             \
    5354 }
    5355 
    5356     AssertPtr(pVM);
    5357     AssertPtr(pVCpu);
     5329static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
     5330{
     5331#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField)           \
     5332    do {                                                           \
     5333        Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0);    \
     5334        pCache->Read.aField[idxField##_CACHE_IDX]    = idxField;   \
     5335        pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0;          \
     5336        ++cReadFields;                                             \
     5337    } while (0)
     5338
    53585339    PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    53595340    uint32_t cReadFields = 0;
     
    54115392#endif
    54125393
    5413     if (pVM->hm.s.fNestedPaging)
     5394    if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    54145395    {
    54155396        VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
     
    55695550 *
    55705551 * @returns VBox status code.
    5571  * @param   pVM             The cross context VM structure.
    55725552 * @param   pVCpu           The cross context virtual CPU structure.
    55735553 *
    55745554 * @remarks No-long-jump zone!!!
    55755555 */
    5576 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
     5556static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
    55775557{
    55785558    int  rc;
    55795559    bool fOffsettedTsc;
    55805560    bool fParavirtTsc;
     5561    PVM  pVM = pVCpu->CTX_SUFF(pVM);
    55815562    if (pVM->hm.s.vmx.fUsePreemptTimer)
    55825563    {
     
    63656346            {
    63666347                uint32_t u32Shadow;
    6367                 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */
    63686348                if (fWhat & CPUMCTX_EXTRN_CR0)
    63696349                {
     
    63786358                }
    63796359
    6380                 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */
    63816360                if (fWhat & CPUMCTX_EXTRN_CR4)
    63826361                {
     
    63916370                if (fWhat & CPUMCTX_EXTRN_CR3)
    63926371                {
     6372                    /* CR0.PG bit changes are always intercepted, so it's up to date. */
    63936373                    if (   pVM->hm.s.vmx.fUnrestrictedGuest
    63946374                        || (   pVM->hm.s.fNestedPaging
    6395                             && CPUMIsGuestPagingEnabledEx(pCtx))) /* PG bit changes are always intercepted, so it's up to date. */
     6375                            && CPUMIsGuestPagingEnabledEx(pCtx)))
    63966376                    {
    63976377                        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
     
    64026382                        }
    64036383
    6404                         /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
     6384                        /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
     6385                           Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */
    64056386                        if (CPUMIsGuestInPAEModeEx(pCtx))
    64066387                        {
     
    65016482 *         to the EM loop.
    65026483 *
    6503  * @param   pVM             The cross context VM structure.
    65046484 * @param   pVCpu           The cross context virtual CPU structure.
    65056485 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    65086488 * @param   fStepping       Running in hmR0VmxRunGuestCodeStep().
    65096489 */
    6510 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
     6490static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
    65116491{
    65126492    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     6493
     6494    /* Pending HM CR3 sync. */
     6495    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     6496    {
     6497        Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
     6498        int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     6499        AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
     6500                        ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
     6501        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     6502    }
     6503
     6504    /* Pending HM PAE PDPEs. */
     6505    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
     6506    {
     6507        PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     6508        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
     6509    }
    65136510
    65146511    /*
    65156512     * Anything pending?  Should be more likely than not if we're doing a good job.
    65166513     */
     6514    PVM pVM = pVCpu->CTX_SUFF(pVM);
    65176515    if (  !fStepping
    65186516        ?    !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK)
     
    65216519          && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
    65226520        return VINF_SUCCESS;
    6523 
    6524 #if 0
    6525     /* We need the control registers now, make sure the guest-CPU context is updated. */
    6526     int rc3 = hmR0VmxImportGuestStatae(pVCpu, CPUMCTX_EXTRN_CR0);
    6527     AssertRCReturn(rc3, rc3);
    6528 
    6529     /** @todo  r=ramshankar: VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES
    6530      *         are not part of VMCPU_FF_HP_R0_PRE_HM_MASK. Hence, the two if
    6531      *         statements below won't ever be entered. Consider removing it or
    6532      *         determine if it is necessary to add these flags to VMCPU_FF_HP_R0_PRE_HM_MASK. */
    6533     /* Pending HM CR3 sync. */
    6534     if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    6535     {
    6536         int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
    6537         AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
    6538                         ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
    6539         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    6540     }
    6541 
    6542     /* Pending HM PAE PDPEs. */
    6543     if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    6544     {
    6545         PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    6546         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    6547     }
    6548 #endif
    65496521
    65506522    /* Pending PGM C3 sync. */
     
    68136785    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
    68146786    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
    6815     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    6816     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     6787    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
     6788    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
    68176789    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
    68186790    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
     
    69156887 *
    69166888 * @returns VBox status code.
    6917  * @param   pVM         The cross context VM structure.
    69186889 * @param   pVCpu       The cross context virtual CPU structure.
    69196890 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     
    69236894 *                      VINF_VMM_UNKNOWN_RING3_CALL.
    69246895 */
    6925 static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
    6926 {
    6927     Assert(pVM);
     6896static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
     6897{
    69286898    Assert(pVCpu);
    69296899    Assert(pMixedCtx);
     
    69736943                              | CPUM_CHANGED_TR
    69746944                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
    6975     if (   pVM->hm.s.fNestedPaging
     6945    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
    69766946        && CPUMIsGuestPagingEnabledEx(pMixedCtx))
    69776947    {
     
    73147284        }
    73157285#endif
    7316         Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    7317               (uint8_t)uIntType));
     7286        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
     7287              uIntType));
    73187288        rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
    73197289                                          pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
     
    75837553 *                              directly (register modified by us, not by
    75847554 *                              hardware on VM-entry).
    7585  *
    7586  * @remarks Requires CR0!
    75877555 */
    75887556static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
     
    75907558{
    75917559    /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
    7592     AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
     7560    AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo));
    75937561    Assert(pfIntrState);
    75947562
     
    76017569    /*
    76027570     * Validate the error-code-valid bit for hardware exceptions.
    7603      * No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling"
     7571     * No error codes for exceptions in real-mode.
     7572     *
     7573     * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
    76047574     */
    76057575    if (   uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
     
    76307600    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
    76317601
    7632     /* We require CR0 to check if the guest is in real-mode. */
    7633     /** @todo No we don't, since CR0.PE is always intercepted. */
    7634     int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
    7635     AssertRCReturn(rc, rc);
    7636 
    76377602    /*
    76387603     * Hardware interrupts & exceptions cannot be delivered through the software interrupt
     
    76437608     * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
    76447609     */
    7645     if (CPUMIsGuestInRealModeEx(pMixedCtx))
     7610    if (CPUMIsGuestInRealModeEx(pMixedCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    76467611    {
    76477612        if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
    76487613        {
    76497614            /*
    7650              * For unrestricted execution enabled CPUs running real-mode guests, we must not                              .
    7651              * set the deliver-error-code bit                                                                             .
    7652              *                                                                                                            .
     7615             * For unrestricted execution enabled CPUs running real-mode guests, we must not
     7616             * set the deliver-error-code bit.
     7617             *
    76537618             * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
    76547619             */
     
    76627627
    76637628            /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
    7664             rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_SREG_MASK
    7665                                                 | CPUMCTX_EXTRN_TABLE_MASK
    7666                                                 | CPUMCTX_EXTRN_RIP
    7667                                                 | CPUMCTX_EXTRN_RSP
    7668                                                 | CPUMCTX_EXTRN_RFLAGS);
    7669             AssertRCReturn(rc, rc);
     7629            int rc2 = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_SREG_MASK
     7630                                                     | CPUMCTX_EXTRN_TABLE_MASK
     7631                                                     | CPUMCTX_EXTRN_RIP
     7632                                                     | CPUMCTX_EXTRN_RSP
     7633                                                     | CPUMCTX_EXTRN_RFLAGS);
     7634            AssertRCReturn(rc2, rc2);
    76707635
    76717636            /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
     
    76817646                    return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState);
    76827647
    7683                 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
    7684                 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
    7685                 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
    7686                                            fStepping, pfIntrState);
     7648                /*
     7649                 * If we're injecting an event with no valid IDT entry, inject a #GP.
     7650                 * No error codes for exceptions in real-mode.
     7651                 *
     7652                 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
     7653                 */
     7654                return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping,
     7655                                           pfIntrState);
    76877656            }
    76887657
     
    77017670            X86IDTR16 IdtEntry;
    77027671            RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
    7703             rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
    7704             AssertRCReturn(rc, rc);
     7672            rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
     7673            AssertRCReturn(rc2, rc2);
    77057674
    77067675            /* Construct the stack frame for the interrupt/exception handler. */
     
    77637732
    77647733    /* Inject. */
    7765     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
     7734    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
    77667735    if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
    77677736        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
     
    77697738    AssertRCReturn(rc, rc);
    77707739
     7740    /* Update CR2. */
    77717741    if (   VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    77727742        && uVector == X86_XCPT_PF)
    77737743        pMixedCtx->cr2 = GCPtrFaultAddress;
    77747744
    7775     Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr,
    7776           pMixedCtx->cr2));
     7745    Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
    77777746
    77787747    return VINF_SUCCESS;
     
    78117780 *
    78127781 * @returns VBox status code.
    7813  * @param   pVM         The cross context VM structure.
    78147782 * @param   pVCpu       The cross context virtual CPU structure.
    7815  * @param   pCpu        Pointer to the CPU info struct.
    7816  */
    7817 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    7818 {
    7819     AssertPtr(pVM);
     7783 * @param   pHostCpu    Pointer to the global CPU info struct.
     7784 */
     7785VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     7786{
    78207787    AssertPtr(pVCpu);
    7821     Assert(pVM->hm.s.vmx.fSupported);
     7788    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
    78227789    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    7823     NOREF(pCpu); NOREF(pVM);
    7824 
    7825     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     7790    RT_NOREF(pHostCpu);
     7791
     7792    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    78267793    Assert((pVCpu->hm.s.fCtxChanged &  (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    78277794                                    == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
     
    79127879            /* Initialize the bare minimum state required for HM. This takes care of
    79137880               initializing VT-x if necessary (onlined CPUs, local init etc.) */
    7914             int rc = HMR0EnterCpu(pVCpu);
     7881            int rc = hmR0EnterCpu(pVCpu);
    79157882            AssertRC(rc);
    79167883            Assert((pVCpu->hm.s.fCtxChanged &  (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     
    80097976 *          mapped (e.g. EFI32).
    80107977 *
    8011  * @param   pVM         The cross context VM structure.
    80127978 * @param   pVCpu       The cross context virtual CPU structure.
    80137979 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     
    80177983 * @remarks No-long-jump zone!!!
    80187984 */
    8019 static VBOXSTRICTRC hmR0VmxExportGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    8020 {
    8021     AssertPtr(pVM);
     7985static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     7986{
    80227987    AssertPtr(pVCpu);
    80237988    AssertPtr(pMixedCtx);
    80247989    HMVMX_ASSERT_PREEMPT_SAFE();
    80257990
    8026     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     7991    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    80277992
    80287993    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
     
    80307995    /* Determine real-on-v86 mode. */
    80317996    pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
    8032     if (   !pVM->hm.s.vmx.fUnrestrictedGuest
    8033         && CPUMIsGuestInRealModeEx(pMixedCtx))
     7997    if (   !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
     7998        &&  CPUMIsGuestInRealModeEx(pMixedCtx))
    80347999    {
    80358000        pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
     
    81088073 * Exports the state shared between the host and guest into the VMCS.
    81098074 *
    8110  * @param   pVM         The cross context VM structure.
    81118075 * @param   pVCpu       The cross context virtual CPU structure.
    81128076 * @param   pCtx        Pointer to the guest-CPU context.
     
    81148078 * @remarks No-long-jump zone!!!
    81158079 */
    8116 static void hmR0VmxExportSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    8117 {
    8118     NOREF(pVM);
    8119 
     8080static void hmR0VmxExportSharedState(PVMCPU pVCpu, PCPUMCTX pCtx)
     8081{
    81208082    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    81218083    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    81548116 *          mapped (e.g. EFI32).
    81558117 *
    8156  * @param   pVM             The cross context VM structure.
    81578118 * @param   pVCpu           The cross context virtual CPU structure.
    81588119 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    81628123 * @remarks No-long-jump zone!!!
    81638124 */
    8164 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     8125static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    81658126{
    81668127    HMVMX_ASSERT_PREEMPT_SAFE();
     
    81898150    else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    81908151    {
    8191         rcStrict = hmR0VmxExportGuestState(pVM, pVCpu, pMixedCtx);
     8152        rcStrict = hmR0VmxExportGuestState(pVCpu, pMixedCtx);
    81928153        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    81938154        { /* likely */}
     
    82368197 * @retval  VINF_* scheduling changes, we have to go back to ring-3.
    82378198 *
    8238  * @param   pVM             The cross context VM structure.
    82398199 * @param   pVCpu           The cross context virtual CPU structure.
    82408200 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    82478207 *                          dispatching took place.
    82488208 */
    8249 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
     8209static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
    82508210{
    82518211    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    82568216
    82578217    /* Check force flag actions that might require us to go back to ring-3. */
    8258     VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx, fStepping);
     8218    VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx, fStepping);
    82598219    if (rcStrict == VINF_SUCCESS)
    82608220    { /* FFs doesn't get set all the time. */ }
     
    82708230     * This is the reason we do it here and not in hmR0VmxExportGuestState().
    82718231     */
     8232    PVM pVM = pVCpu->CTX_SUFF(pVM);
    82728233    if (   !pVCpu->hm.s.vmx.u64MsrApicBase
    82738234        && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     
    82988259
    82998260    /*
    8300      * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
    8301      * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
     8261     * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
     8262     * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
     8263     * also result in triple-faulting the VM.
    83028264     */
    83038265    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping);
     
    83298291     * Hence, loading of the guest state needs to be done -after- injection of events.
    83308292     */
    8331     rcStrict = hmR0VmxExportGuestStateOptimal(pVM, pVCpu, pMixedCtx);
     8293    rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pMixedCtx);
    83328294    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    83338295    { /* likely */ }
     
    83398301
    83408302    /*
    8341      * We disable interrupts so that we don't miss any interrupts that would flag
    8342      * preemption (IPI/timers etc.) when thread-context hooks aren't used and we've
    8343      * been running with preemption disabled for a while.  Since this is purly to aid
    8344      * the RTThreadPreemptIsPending code, it doesn't matter that it may temporarily
    8345      * reenable and disable interrupt on NT.
     8303     * We disable interrupts so that we don't miss any interrupts that would flag preemption
     8304     * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
     8305     * preemption disabled for a while.  Since this is purly to aid the
     8306     * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
     8307     * disable interrupt on NT.
    83468308     *
    8347      * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
    8348      * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
     8309     * We need to check for force-flags that could've possible been altered since we last
     8310     * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
     8311     * see @bugref{6398}).
    83498312     *
    8350      * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
    8351      * executing guest code.
     8313     * We also check a couple of other force-flags as a last opportunity to get the EMT back
     8314     * to ring-3 before executing guest code.
    83528315     */
    83538316    pVmxTransient->fEFlags = ASMIntDisableFlags();
     
    83928355 * point.
    83938356 *
    8394  * @param   pVM             The cross context VM structure.
    83958357 * @param   pVCpu           The cross context virtual CPU structure.
    83968358 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    84028364 * @remarks No-long-jump zone!!!
    84038365 */
    8404 static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     8366static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    84058367{
    84068368    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    84148376    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    84158377
     8378    PVM pVM = pVCpu->CTX_SUFF(pVM);
    84168379    if (!CPUMIsGuestFPUStateActive(pVCpu))
    84178380    {
     
    84498412     */
    84508413    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
    8451         hmR0VmxExportSharedState(pVM, pVCpu, pMixedCtx);
     8414        hmR0VmxExportSharedState(pVCpu, pMixedCtx);
    84528415    AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    84538416
     
    84778440        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    84788441    {
    8479         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
     8442        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
    84808443        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
    84818444    }
     
    85328495#endif
    85338496#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
    8534     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
     8497    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
    85358498    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    85368499        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
     
    85678530        TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.u64TSCOffset);
    85688531
    8569     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     8532    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
    85708533    TMNotifyEndOfExecution(pVCpu);                                    /* Notify TM that the guest is no longer running. */
    85718534    Assert(!ASMIntAreEnabled());
     
    86648627 *
    86658628 * @returns VBox status code.
    8666  * @param   pVM         The cross context VM structure.
    86678629 * @param   pVCpu       The cross context virtual CPU structure.
    86688630 * @param   pCtx        Pointer to the guest-CPU context.
     
    86708632 * @note    Mostly the same as hmR0VmxRunGuestCodeStep().
    86718633 */
    8672 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     8634static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx)
    86738635{
    86748636    VMXTRANSIENT VmxTransient;
     
    86858647           to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    86868648        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    8687         rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
     8649        rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, false /* fStepping */);
    86888650        if (rcStrict != VINF_SUCCESS)
    86898651            break;
    86908652
    8691         hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
    8692         int rcRun = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
     8653        hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
     8654        int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
    86938655        /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
    86948656
     
    87028664        else
    87038665        {
    8704             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    8705             hmR0VmxReportWorldSwitchError(pVM, pVCpu, rcRun, pCtx, &VmxTransient);
     8666            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     8667            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
    87068668            return rcRun;
    87078669        }
     
    87118673        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
    87128674        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    8713         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     8675        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    87148676        HMVMX_START_EXIT_DISPATCH_PROF();
    87158677
     
    87228684        rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
    87238685#endif
    8724         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     8686        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    87258687        if (rcStrict == VINF_SUCCESS)
    87268688        {
    8727             if (cLoops <= pVM->hm.s.cMaxResumeLoops)
     8689            if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
    87288690                continue; /* likely */
    87298691            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    92299191 *
    92309192 * @returns Strict VBox status code (i.e. informational status codes too).
    9231  * @param   pVM             The cross context VM structure.
    92329193 * @param   pVCpu           The cross context virtual CPU structure.
    92339194 * @param   pMixedCtx       Pointer to the guest-CPU context.
     
    92389199 *          and to the point. No longer than 33 chars long, please.
    92399200 */
    9240 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx,
    9241                                                   PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
     9201static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
     9202                                                  uint32_t uExitReason)
    92429203{
    92439204    /*
     
    95869547     *       one, in order to avoid event nesting.
    95879548     */
     9549    PVM pVM = pVCpu->CTX_SUFF(pVM);
    95889550    if (   enmEvent1 != DBGFEVENT_END
    95899551        && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
     
    96139575 *
    96149576 * @returns Strict VBox status code (i.e. informational status codes too).
    9615  * @param   pVM             The cross context VM structure.
    96169577 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    96179578 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    96229583 * @param   pDbgState       The debug state.
    96239584 */
    9624 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
     9585DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
    96259586                                                   uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
    96269587{
     
    97489709        && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
    97499710    {
    9750         VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVM, pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9711        VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
    97519712        if (rcStrict != VINF_SUCCESS)
    97529713            return rcStrict;
     
    97689729 *
    97699730 * @returns Strict VBox status code (i.e. informational status codes too).
    9770  * @param   pVM         The cross context VM structure.
    97719731 * @param   pVCpu       The cross context virtual CPU structure.
    97729732 * @param   pCtx        Pointer to the guest-CPU context.
     
    97749734 * @note    Mostly the same as hmR0VmxRunGuestCodeNormal().
    97759735 */
    9776 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     9736static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, PCPUMCTX pCtx)
    97779737{
    97789738    VMXTRANSIENT VmxTransient;
     
    98069766        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    98079767        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
    9808         rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, fStepping);
     9768        rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, fStepping);
    98099769        if (rcStrict != VINF_SUCCESS)
    98109770            break;
    98119771
    9812         hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
     9772        hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
    98139773        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
    98149774
     
    98169776         * Now we can run the guest code.
    98179777         */
    9818         int rcRun = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
     9778        int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
    98199779
    98209780        /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
     
    98319791        else
    98329792        {
    9833             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    9834             hmR0VmxReportWorldSwitchError(pVM, pVCpu, rcRun, pCtx, &VmxTransient);
     9793            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     9794            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
    98359795            return rcRun;
    98369796        }
     
    98409800        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
    98419801        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    9842         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     9802        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    98439803        HMVMX_START_EXIT_DISPATCH_PROF();
    98449804
     
    98489808         * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
    98499809         */
    9850         rcStrict = hmR0VmxRunDebugHandleExit(pVM, pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
    9851         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     9810        rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
     9811        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    98529812        if (rcStrict != VINF_SUCCESS)
    98539813            break;
    9854         if (cLoops > pVM->hm.s.cMaxResumeLoops)
     9814        if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
    98559815        {
    98569816            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    1005810018 *
    1005910019 * @returns Strict VBox status code (i.e. informational status codes too).
    10060  * @param   pVM         The cross context VM structure.
    1006110020 * @param   pVCpu       The cross context virtual CPU structure.
    1006210021 * @param   pCtx        Pointer to the guest-CPU context.
    1006310022 */
    10064 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     10023VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx)
    1006510024{
    1006610025    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    1007410033        && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
    1007510034        && !DBGFIsStepping(pVCpu)
    10076         && !pVM->dbgf.ro.cEnabledInt3Breakpoints)
    10077         rcStrict = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
     10035        && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
     10036        rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, pCtx);
    1007810037    else
    10079         rcStrict = hmR0VmxRunGuestCodeDebug(pVM, pVCpu, pCtx);
     10038        rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, pCtx);
    1008010039
    1008110040    if (rcStrict == VERR_EM_INTERPRETER)
     
    1008410043        rcStrict = VINF_EM_TRIPLE_FAULT;
    1008510044
    10086     int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rcStrict);
     10045    int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx, rcStrict);
    1008710046    if (RT_FAILURE(rc2))
    1008810047    {
     
    1029710256 *         wrong with the guest state.
    1029810257 *
    10299  * @param   pVM     The cross context VM structure.
    1030010258 * @param   pVCpu   The cross context virtual CPU structure.
    1030110259 * @param   pCtx    Pointer to the guest-CPU state.
     
    1030410262 *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
    1030510263 */
    10306 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     10264static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx)
    1030710265{
    1030810266#define HMVMX_ERROR_BREAK(err)              { uError = (err); break; }
     
    1031210270                                            } else do { } while (0)
    1031310271
    10314     int      rc;
    10315     uint32_t uError             = VMX_IGS_ERROR;
    10316     uint32_t u32Val;
    10317     bool     fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
     10272    int        rc;
     10273    PVM        pVM = pVCpu->CTX_SUFF(pVM);
     10274    uint32_t   uError = VMX_IGS_ERROR;
     10275    uint32_t   u32Val;
     10276    bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
    1031810277
    1031910278    do
     
    1062810587            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    1062910588                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10630             HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10631             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
     10589            HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     10590            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    1063210591                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10633             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
     10592            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    1063410593                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10635             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
     10594            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    1063610595                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
    1063710596#endif
     
    1068410643            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    1068510644                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10686             HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10687             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
     10645            HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     10646            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    1068810647                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10689             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
     10648            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    1069010649                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10691             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
     10650            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    1069210651                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
    1069310652#endif
     
    1160711566    AssertRCReturn(rc, rc);
    1160811567
    11609     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
     11568    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
    1161011569    NOREF(uInvalidReason);
    1161111570
     
    1164011599    rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val);                 AssertRC(rc);
    1164111600    Log4(("VMX_VMCS64_CTRL_EPTP_FULL                  %#RX64\n", u64Val));
     11601
     11602    hmR0DumpRegs(pVCpu, pMixedCtx);
    1164211603#else
    1164311604    NOREF(pVmxTransient);
    1164411605#endif
    1164511606
    11646     hmR0DumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
    1164711607    return VERR_VMX_INVALID_GUEST_STATE;
    1164811608}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r72744 r72805  
    2929#ifdef IN_RING0
    3030
    31 VMMR0DECL(int)          VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
     31VMMR0DECL(int)          VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu);
    3232VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    33 VMMR0DECL(int)          VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
     33VMMR0DECL(int)          VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
    3434                                       bool fEnabledBySystem, void *pvMsrs);
    35 VMMR0DECL(int)          VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     35VMMR0DECL(int)          VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    3636VMMR0DECL(int)          VMXR0GlobalInit(void);
    3737VMMR0DECL(void)         VMXR0GlobalTerm(void);
     
    4040VMMR0DECL(int)          VMXR0SetupVM(PVM pVM);
    4141VMMR0DECL(int)          VMXR0ExportHostState(PVMCPU pVCpu);
    42 VMMR0DECL(int)          VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
     42VMMR0DECL(int)          VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
    4343VMMR0DECL(int)          VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
    44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     44VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx);
    4545DECLASM(int)            VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    4646DECLASM(int)            VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r72778 r72805  
    12371237                     * Enter HM context.
    12381238                     */
    1239                     rc = HMR0Enter(pVM, pVCpu);
     1239                    rc = HMR0Enter(pVCpu);
    12401240                    if (RT_SUCCESS(rc))
    12411241                    {
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r72785 r72805  
    140140    EXIT_REASON(VMX_EXIT_INVPCID                ,  58, "INVPCID instruction."),
    141141    EXIT_REASON(VMX_EXIT_VMFUNC                 ,  59, "VMFUNC instruction."),
    142     EXIT_REASON(VMX_EXIT_ENCLS                  ,  60, "ENCLS instrunction."),
     142    EXIT_REASON(VMX_EXIT_ENCLS                  ,  60, "ENCLS instruction."),
    143143    EXIT_REASON(VMX_EXIT_RDSEED                 ,  61, "RDSEED instruction."),
    144144    EXIT_REASON(VMX_EXIT_PML_FULL               ,  62, "Page-modification log full."),
     
    258258    EXIT_REASON(SVM_EXIT_INIT         ,   99, "Physical INIT signal (host)."),
    259259    EXIT_REASON(SVM_EXIT_VINTR        ,  100, "Virtual interrupt-window exit."),
    260     EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE,  101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
    261     EXIT_REASON(SVM_EXIT_IDTR_READ    ,  102, "Read IDTR"),
    262     EXIT_REASON(SVM_EXIT_GDTR_READ    ,  103, "Read GDTR"),
     260    EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE,  101, "Selective CR0 Write (to bits other than CR0.TS and CR0.MP)."),
     261    EXIT_REASON(SVM_EXIT_IDTR_READ    ,  102, "Read IDTR."),
     262    EXIT_REASON(SVM_EXIT_GDTR_READ    ,  103, "Read GDTR."),
    263263    EXIT_REASON(SVM_EXIT_LDTR_READ    ,  104, "Read LDTR."),
    264264    EXIT_REASON(SVM_EXIT_TR_READ      ,  105, "Read TR."),
     
    280280    EXIT_REASON(SVM_EXIT_INVLPG       ,  121, "INVLPG instruction."),
    281281    EXIT_REASON(SVM_EXIT_INVLPGA      ,  122, "INVLPGA instruction."),
    282     EXIT_REASON(SVM_EXIT_IOIO         ,  123, "IN/OUT accessing protected port."),
     282    EXIT_REASON(SVM_EXIT_IOIO         ,  123, "IN/OUT/INS/OUTS instruction."),
    283283    EXIT_REASON(SVM_EXIT_MSR          ,  124, "RDMSR or WRMSR access to protected MSR."),
    284284    EXIT_REASON(SVM_EXIT_TASK_SWITCH  ,  125, "Task switch."),
    285     EXIT_REASON(SVM_EXIT_FERR_FREEZE  ,  126, "Legacy FPU handling enabled; CPU frozen in an x87/mmx instr. waiting for interrupt."),
     285    EXIT_REASON(SVM_EXIT_FERR_FREEZE  ,  126, "FERR Freeze; CPU frozen in an x87/mmx instruction waiting for interrupt."),
    286286    EXIT_REASON(SVM_EXIT_SHUTDOWN     ,  127, "Shutdown."),
    287287    EXIT_REASON(SVM_EXIT_VMRUN        ,  128, "VMRUN instruction."),
     
    883883                             "/PROF/CPU%d/HM/StatEntry", i);
    884884        AssertRC(rc);
    885         rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
    886                              "Profiling of VMXR0RunGuestCode exit part 1",
     885        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPreExit, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
     886                             "Profiling of pre-exit processing after returning from GC",
    887887                             "/PROF/CPU%d/HM/SwitchFromGC_1", i);
    888888        AssertRC(rc);
    889         rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
    890                              "Profiling of VMXR0RunGuestCode exit part 2",
     889        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitHandling, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
     890                             "Profiling of exit handling (longjmps not included!)",
    891891                             "/PROF/CPU%d/HM/SwitchFromGC_2", i);
    892892        AssertRC(rc);
     
    958958        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF,            "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
    959959        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk,        "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions.");
    960         HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt,                "/HM/CPU%d/Exit/Instr/Hlt", "Guest attempted to execute HLT.");
    961         HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr,              "/HM/CPU%d/Exit/Instr/Rdmsr", "Guest attempted to execute RDMSR.");
    962         HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr,              "/HM/CPU%d/Exit/Instr/Wrmsr", "Guest attempted to execute WRMSR.");
    963         HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait,              "/HM/CPU%d/Exit/Instr/Mwait", "Guest attempted to execute MWAIT.");
    964         HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor,            "/HM/CPU%d/Exit/Instr/Monitor", "Guest attempted to execute MONITOR.");
    965         HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite,           "/HM/CPU%d/Exit/Instr/DR-Write", "Guest attempted to write a debug register.");
    966         HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead,            "/HM/CPU%d/Exit/Instr/DR-Read", "Guest attempted to read a debug register.");
    967         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR0", "Guest attempted to read CR0.");
    968         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR2", "Guest attempted to read CR2.");
    969         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR3", "Guest attempted to read CR3.");
    970         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR4", "Guest attempted to read CR4.");
    971         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR8", "Guest attempted to read CR8.");
    972         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR0", "Guest attempted to write CR0.");
    973         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR2", "Guest attempted to write CR2.");
    974         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR3", "Guest attempted to write CR3.");
    975         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR4", "Guest attempted to write CR4.");
    976         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR8", "Guest attempted to write CR8.");
    977         HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts,               "/HM/CPU%d/Exit/Instr/CLTS", "Guest attempted to execute CLTS.");
    978         HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw,               "/HM/CPU%d/Exit/Instr/LMSW", "Guest attempted to execute LMSW.");
    979         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli,                "/HM/CPU%d/Exit/Instr/Cli", "Guest attempted to execute CLI.");
    980         HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti,                "/HM/CPU%d/Exit/Instr/Sti", "Guest attempted to execute STI.");
    981         HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf,              "/HM/CPU%d/Exit/Instr/Pushf", "Guest attempted to execute PUSHF.");
    982         HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf,               "/HM/CPU%d/Exit/Instr/Popf", "Guest attempted to execute POPF.");
    983         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret,               "/HM/CPU%d/Exit/Instr/Iret", "Guest attempted to execute IRET.");
    984         HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt,                "/HM/CPU%d/Exit/Instr/Int", "Guest attempted to execute INT.");
    985         HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess,         "/HM/CPU%d/Exit/Instr/XdtrAccess", "Guest attempted to access descriptor table register (GDTR, IDTR, LDTR).");
     960        HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt,                "/HM/CPU%d/Exit/Instr/Hlt", "HLT instruction.");
     961        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr,              "/HM/CPU%d/Exit/Instr/Rdmsr", "RDMSR instruction.");
     962        HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr,              "/HM/CPU%d/Exit/Instr/Wrmsr", "WRMSR instruction.");
     963        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait,              "/HM/CPU%d/Exit/Instr/Mwait", "MWAIT instruction.");
     964        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor,            "/HM/CPU%d/Exit/Instr/Monitor", "MONITOR instruction.");
     965        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite,           "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write.");
     966        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead,            "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read.");
     967        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR0", "CR0 read.");
     968        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR2", "CR2 read.");
     969        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR3", "CR3 read.");
     970        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR4", "CR4 read.");
     971        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Read,            "/HM/CPU%d/Exit/Instr/CR-Read/CR8", "CR8 read.");
     972        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR0", "CR0 write.");
     973        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR2", "CR2 write.");
     974        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR3", "CR3 write.");
     975        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR4", "CR4 write.");
     976        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Write,           "/HM/CPU%d/Exit/Instr/CR-Write/CR8", "CR8 write.");
     977        HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts,               "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction.");
     978        HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw,               "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction.");
     979        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli,                "/HM/CPU%d/Exit/Instr/Cli", "CLI instruction.");
     980        HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti,                "/HM/CPU%d/Exit/Instr/Sti", "STI instruction.");
     981        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf,              "/HM/CPU%d/Exit/Instr/Pushf", "PUSHF instruction.");
     982        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf,               "/HM/CPU%d/Exit/Instr/Popf", "POPF instruction.");
     983        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret,               "/HM/CPU%d/Exit/Instr/Iret", "IRET instruction.");
     984        HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt,                "/HM/CPU%d/Exit/Instr/Int", "INT instruction.");
     985        HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess,         "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
    986986        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite,            "/HM/CPU%d/Exit/IO/Write", "I/O write.");
    987987        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead,             "/HM/CPU%d/Exit/IO/Read", "I/O read.");
     
    989989        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead,       "/HM/CPU%d/Exit/IO/ReadString", "String I/O read.");
    990990        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow,          "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");
    991         HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt,             "/HM/CPU%d/Exit/ExtInt", "Host interrupt received.");
     991        HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt,             "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host).");
    992992#endif
    993993        HM_REG_COUNTER(&pVCpu->hm.s.StatExitHostNmiInGC,        "/HM/CPU%d/Exit/HostNmiInGC", "Host NMI received while in guest context.");
     
    995995        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer,       "/HM/CPU%d/Exit/PreemptTimer", "VMX-preemption timer expired.");
    996996        HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold,  "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
    997         HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch,         "/HM/CPU%d/Exit/TaskSwitch", "Guest attempted a task switch.");
     997        HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch,         "/HM/CPU%d/Exit/TaskSwitch", "Task switch.");
    998998        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf,                "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");
    999999        HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess,         "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
     
    15901590
    15911591            /*
    1592              * Construct a 1024 element page directory with 4 MB pages for
    1593              * the identity mapped page table used in real and protected mode
    1594              * without paging with EPT.
     1592             * Construct a 1024 element page directory with 4 MB pages for the identity mapped
     1593             * page table used in real and protected mode without paging with EPT.
    15951594             */
    15961595            pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
     
    21042103#ifdef LOG_ENABLED
    21052104        char            szOutput[256];
    2106 
    21072105        rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
    21082106                                szOutput, sizeof(szOutput), NULL);
     
    24732471             * pop ECX                       [59]
    24742472             * jmp return_address            [E9 return_address]
    2475              *
    24762473             */
    24772474            bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
     
    25242521             * pop ECX                       [59]
    25252522             * jmp return_address            [E9 return_address]
    2526              *
    25272523             */
    25282524            Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
     
    27302726                        /*
    27312727                         * The following two requirements are VT-x specific:
    2732                          *  - G bit must be set if any high limit bits are set.
    2733                          *  - G bit must be clear if any low limit bits are clear.
     2728                         *   - G bit must be set if any high limit bits are set.
     2729                         *   - G bit must be clear if any low limit bits are clear.
    27342730                         */
    27352731                        if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
     
    27732769    AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
    27742770                    ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
    2775                     ("%#x\n", pSel->Attr.u),
    2776                     false);
     2771                    ("%#x\n", pSel->Attr.u), false);
    27772772
    27782773    /* DPL must equal RPL.
    27792774       Note! This is also a hard requirement like above. */
    27802775    AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
    2781                     ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel),
    2782                     false);
     2776                    ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
    27832777
    27842778    /*
    27852779     * The following two requirements are VT-x specific:
    2786      *  - G bit must be set if any high limit bits are set.
    2787      *  - G bit must be clear if any low limit bits are clear.
     2780     *   - G bit must be set if any high limit bits are set.
     2781     *   - G bit must be clear if any low limit bits are clear.
    27882782     */
    27892783    if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
     
    28432837
    28442838    /* If we're still executing the IO code, then return false. */
    2845     if (    RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
    2846         &&  pCtx->rip <  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
    2847         &&  pCtx->rip >  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
    2848         &&  pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
     2839    if (   RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
     2840        && pCtx->rip <  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
     2841        && pCtx->rip >  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
     2842        && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
    28492843        return false;
    28502844
     
    28752869            if (CPUMIsGuestInRealModeEx(pCtx))
    28762870            {
    2877                 /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
     2871                /*
     2872                 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
    28782873                 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
    28792874                 * If this is not true, we cannot execute real mode as V86 and have to fall
     
    29042899            else
    29052900            {
    2906                 /* Verify the requirements for executing code in protected
    2907                    mode. VT-x can't handle the CPU state right after a switch
    2908                    from real to protected mode. (all sorts of RPL & DPL assumptions). */
     2901                /*
     2902                 * Verify the requirements for executing code in protected mode. VT-x can't
     2903                 * handle the CPU state right after a switch from real to protected mode
     2904                 * (all sorts of RPL & DPL assumptions).
     2905                 */
    29092906                if (pVCpu->hm.s.vmx.fWasInRealMode)
    29102907                {
     
    29452942        else
    29462943        {
    2947             if (    !CPUMIsGuestInLongModeEx(pCtx)
    2948                 &&  !pVM->hm.s.vmx.fUnrestrictedGuest)
     2944            if (   !CPUMIsGuestInLongModeEx(pCtx)
     2945                && !pVM->hm.s.vmx.fUnrestrictedGuest)
    29492946            {
    29502947                if (   !pVM->hm.s.fNestedPaging        /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
     
    29562953                    return false;
    29572954
    2958                 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
    2959                 /* Windows XP; switch to protected mode; all selectors are marked not present in the
    2960                  * hidden registers (possible recompiler bug; see load_seg_vm) */
     2955                /*
     2956                 * The guest is about to complete the switch to protected mode. Wait a bit longer.
     2957                 * Windows XP; switch to protected mode; all selectors are marked not present
     2958                 * in the hidden registers (possible recompiler bug; see load_seg_vm).
     2959                 */
    29612960                /** @todo Is this supposed recompiler bug still relevant with IEM? */
    29622961                if (pCtx->cs.Attr.n.u1Present == 0)
     
    29652964                    return false;
    29662965
    2967                 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
    2968                    VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
     2966                /*
     2967                 * Windows XP: possible same as above, but new recompiler requires new
     2968                 * heuristics? VT-x doesn't seem to like something about the guest state and
     2969                 * this stuff avoids it.
     2970                 */
    29692971                /** @todo This check is actually wrong, it doesn't take the direction of the
    29702972                 *        stack segment into account. But, it does the job for now. */
  • trunk/src/VBox/VMM/include/HMInternal.h

    r72744 r72805  
    921921    } EmulateIoBlock;
    922922
    923     /* */
     923    /* Pending IO operation. */
    924924    struct
    925925    {
     
    957957
    958958    STAMPROFILEADV          StatEntry;
    959     STAMPROFILEADV          StatExit1;
    960     STAMPROFILEADV          StatExit2;
     959    STAMPROFILEADV          StatPreExit;
     960    STAMPROFILEADV          StatExitHandling;
    961961    STAMPROFILEADV          StatExitIO;
    962962    STAMPROFILEADV          StatExitMovCRx;
     
    11141114#ifdef IN_RING0
    11151115VMMR0_INT_DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void);
     1116VMMR0_INT_DECL(int)              hmR0EnterCpu(PVMCPU pVCpu);
    11161117
    11171118# ifdef VBOX_STRICT
    1118 VMMR0_INT_DECL(void) hmR0DumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     1119VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx);
    11191120VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
    1120 # else
    1121 #  define hmR0DumpRegs(a, b ,c)          do { } while (0)
    1122 #  define hmR0DumpDescriptor(a, b, c)    do { } while (0)
    1123 # endif /* VBOX_STRICT */
     1121# endif
    11241122
    11251123# ifdef VBOX_WITH_KERNEL_USING_XMM
     
    11271125DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
    11281126# endif
    1129 
    11301127#endif /* IN_RING0 */
    11311128
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette