VirtualBox

Ignore:
Timestamp:
Jul 3, 2018 4:05:43 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM: bugref:9193 Fixes honoring pending VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES
before re-entering guest execution with VT-x R0 code.
Avoid a couple of VMWRITES because we already have cache's of the values (CR4 Mask, CR0 mask).
Parameter cleanup.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72786 r72805  
    7373# endif
    7474#endif /* !VBOX_WITH_STATISTICS */
    75 
    7675
    7776/** If we decide to use a function table approach this can be useful to
     
    528527 *
    529528 * @returns VBox status code.
    530  * @param   pCpu            Pointer to the CPU info struct.
     529 * @param   pHostCpu        Pointer to the CPU info struct.
    531530 * @param   pVM             The cross context VM structure. Can be
    532531 *                          NULL after a resume!
     
    536535 * @param   pvArg           Unused on AMD-V.
    537536 */
    538 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     537VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    539538                              void *pvArg)
    540539{
     
    560559        if (   pVM
    561560            && pVM->hm.s.svm.fIgnoreInUseError)
    562             pCpu->fIgnoreAMDVInUseError = true;
    563 
    564         if (!pCpu->fIgnoreAMDVInUseError)
     561            pHostCpu->fIgnoreAMDVInUseError = true;
     562
     563        if (!pHostCpu->fIgnoreAMDVInUseError)
    565564        {
    566565            ASMSetFlags(fEFlags);
     
    584583     * entirely with before executing any guest code.
    585584     */
    586     pCpu->fFlushAsidBeforeUse = true;
     585    pHostCpu->fFlushAsidBeforeUse = true;
    587586
    588587    /*
    589588     * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
    590589     */
    591     ++pCpu->cTlbFlushes;
     590    ++pHostCpu->cTlbFlushes;
    592591
    593592    return VINF_SUCCESS;
     
    599598 *
    600599 * @returns VBox status code.
    601  * @param   pCpu            Pointer to the CPU info struct.
     600 * @param   pHostCpu        Pointer to the CPU info struct.
    602601 * @param   pvCpuPage       Pointer to the global CPU page.
    603602 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    604603 */
    605 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     604VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    606605{
    607606    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    609608                 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
    610609    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    611     NOREF(pCpu);
     610    RT_NOREF(pHostCpu);
    612611
    613612    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
     
    11691168 *
    11701169 * @returns VBox status code.
    1171  * @param   pVM         The cross context VM structure.
    11721170 * @param   pVCpu       The cross context virtual CPU structure.
    11731171 * @param   GCVirt      Guest virtual address of the page to invalidate.
    11741172 */
    1175 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    1176 {
    1177     AssertReturn(pVM, VERR_INVALID_PARAMETER);
    1178     Assert(pVM->hm.s.svm.fSupported);
    1179 
    1180     bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
     1173VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
     1174{
     1175    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
     1176
     1177    bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
    11811178
    11821179    /* Skip it if a TLB flush is already pending. */
     
    16101607    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    16111608
    1612     Assert(RT_HI_U32(uShadowCr0) == 0);
     1609    Assert(!RT_HI_U32(uShadowCr0));
    16131610    if (pVmcb->guest.u64CR0 != uShadowCr0)
    16141611    {
     
    17231720
    17241721    /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
    1725     Assert(RT_HI_U32(uShadowCr4) == 0);
     1722    Assert(!RT_HI_U32(uShadowCr4));
    17261723    pVmcb->guest.u64CR4 = uShadowCr4;
    17271724    pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
     
    23372334 *
    23382335 * @returns VBox status code.
    2339  * @param   pVM         The cross context VM structure.
    23402336 * @param   pVCpu       The cross context virtual CPU structure.
    2341  * @param   pCpu        Pointer to the CPU info struct.
    2342  */
    2343 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
    2344 {
    2345     AssertPtr(pVM);
     2337 * @param   pHostCpu    Pointer to the CPU info struct.
     2338 */
     2339VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     2340{
    23462341    AssertPtr(pVCpu);
    2347     Assert(pVM->hm.s.svm.fSupported);
     2342    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
    23482343    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2349     NOREF(pVM); NOREF(pCpu);
    2350 
    2351     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     2344    RT_NOREF(pHostCpu);
     2345
     2346    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    23522347    Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
    23532348                                   == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
     
    24102405             * initializing AMD-V if necessary (onlined CPUs, local init etc.)
    24112406             */
    2412             int rc = HMR0EnterCpu(pVCpu);
     2407            int rc = hmR0EnterCpu(pVCpu);
    24132408            AssertRC(rc); NOREF(rc);
    24142409            Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
     
    31263121    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
    31273122    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
    3128     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    3129     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     3123    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
     3124    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
    31303125    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    31313126
     
    32553250 *
    32563251 * @returns VBox status code.
    3257  * @param   pVM         The cross context VM structure.
    32583252 * @param   pVCpu       The cross context virtual CPU structure.
    32593253 * @param   pCtx        Pointer to the guest-CPU context.
     
    32613255 *                      VINF_VMM_UNKNOWN_RING3_CALL.
    32623256 */
    3263 static int hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
    3264 {
    3265     Assert(pVM);
     3257static int hmR0SvmExitToRing3(PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
     3258{
    32663259    Assert(pVCpu);
    32673260    Assert(pCtx);
     
    32703263    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    32713264    VMMRZCallRing3Disable(pVCpu);
    3272     Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions));
     3265    Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions,
     3266              pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
    32733267
    32743268    /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
     
    32903284                               | CPUM_CHANGED_TR
    32913285                               | CPUM_CHANGED_HIDDEN_SEL_REGS);
    3292     if (   pVM->hm.s.fNestedPaging
     3286    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
    32933287        && CPUMIsGuestPagingEnabledEx(pCtx))
    32943288    {
     
    33313325 * intercepts.
    33323326 *
    3333  * @param   pVM         The cross context VM structure.
    3334  * @param   pVCpu       The cross context virtual CPU structure.
    33353327 * @param   pCtx        Pointer to the guest-CPU or nested-guest-CPU context.
    33363328 * @param   pVmcb       Pointer to the VM control block.
     
    33383330 * @remarks No-long-jump zone!!!
    33393331 */
    3340 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
     3332static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
    33413333{
    33423334    /*
     
    33473339    bool       fParavirtTsc;
    33483340    uint64_t   uTscOffset;
    3349     bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
     3341    bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
    33503342
    33513343    bool fIntercept;
     
    40794071 * Reports world-switch error and dumps some useful debug info.
    40804072 *
    4081  * @param   pVM             The cross context VM structure.
    40824073 * @param   pVCpu           The cross context virtual CPU structure.
    40834074 * @param   rcVMRun         The return code from VMRUN (or
     
    40864077 * @param   pCtx            Pointer to the guest-CPU context.
    40874078 */
    4088 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
    4089 {
    4090     NOREF(pCtx);
     4079static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
     4080{
    40914081    HMSVM_ASSERT_PREEMPT_SAFE();
    40924082    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    40934083    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    40944084
    4095     PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    40964085    if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
    40974086    {
    4098         hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
    4099         /** @todo We probably don't need to dump this anymore or we can expand
    4100          *        hmR0DumpRegs()? */
    41014087#ifdef VBOX_STRICT
     4088        hmR0DumpRegs(pVCpu, pCtx);
     4089        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    41024090        Log4(("ctrl.u32VmcbCleanBits                 %#RX32\n",   pVmcb->ctrl.u32VmcbCleanBits));
    41034091        Log4(("ctrl.u16InterceptRdCRx                %#x\n",      pVmcb->ctrl.u16InterceptRdCRx));
     
    42254213        Log4Func(("rcVMRun=%d\n", rcVMRun));
    42264214
    4227     NOREF(pVmcb);
     4215    NOREF(pCtx);
    42284216}
    42294217
     
    42454233 *         to the EM loop.
    42464234 *
    4247  * @param   pVM         The cross context VM structure.
    42484235 * @param   pVCpu       The cross context virtual CPU structure.
    42494236 * @param   pCtx        Pointer to the guest-CPU context.
    42504237 */
    4251 static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4238static int hmR0SvmCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pCtx)
    42524239{
    42534240    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    42624249        APICUpdatePendingInterrupts(pVCpu);
    42634250
     4251    PVM pVM = pVCpu->CTX_SUFF(pVM);
    42644252    if (   VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
    42654253                            ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
     
    43244312 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    43254313 *
    4326  * @param   pVM             The cross context VM structure.
    43274314 * @param   pVCpu           The cross context virtual CPU structure.
    43284315 * @param   pCtx            Pointer to the nested-guest-CPU context.
     
    43324319 * @sa      hmR0SvmPreRunGuest.
    43334320 */
    4334 static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4321static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43354322{
    43364323    HMSVM_ASSERT_PREEMPT_SAFE();
     
    43434330
    43444331    /* Check force flag actions that might require us to go back to ring-3. */
    4345     int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     4332    int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx);
    43464333    if (rc != VINF_SUCCESS)
    43474334        return rc;
     
    43644351     * NB: If we could continue a task switch exit we wouldn't need to do this.
    43654352     */
     4353    PVM pVM = pVCpu->CTX_SUFF(pVM);
    43664354    if (RT_UNLIKELY(   !pVM->hm.s.svm.u32Features
    43674355                    &&  pVCpu->hm.s.Event.fPending
     
    44464434 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    44474435 *
    4448  * @param   pVM             The cross context VM structure.
    44494436 * @param   pVCpu           The cross context virtual CPU structure.
    44504437 * @param   pCtx            Pointer to the guest-CPU context.
    44514438 * @param   pSvmTransient   Pointer to the SVM transient structure.
    44524439 */
    4453 static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4440static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    44544441{
    44554442    HMSVM_ASSERT_PREEMPT_SAFE();
     
    44574444
    44584445    /* Check force flag actions that might require us to go back to ring-3. */
    4459     int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     4446    int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx);
    44604447    if (rc != VINF_SUCCESS)
    44614448        return rc;
     
    44714458     * NB: If we could continue a task switch exit we wouldn't need to do this.
    44724459     */
     4460    PVM pVM = pVCpu->CTX_SUFF(pVM);
    44734461    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
    44744462        if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
     
    46044592        || fMigratedHostCpu)
    46054593    {
    4606         hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pCtx, pVmcb);
     4594        hmR0SvmUpdateTscOffsetting(pVCpu, pCtx, pVmcb);
    46074595        pSvmTransient->fUpdateTscOffsetting = false;
    46084596    }
     
    46874675
    46884676/**
    4689  * Wrapper for running the guest code in AMD-V.
     4677 * Wrapper for running the guest (or nested-guest) code in AMD-V.
    46904678 *
    46914679 * @returns VBox strict status code.
    4692  * @param   pVM         The cross context VM structure.
    46934680 * @param   pVCpu       The cross context virtual CPU structure.
    46944681 * @param   pCtx        Pointer to the guest-CPU context.
     4682 * @param   HCPhysVmcb  The host physical address of the VMCB.
    46954683 *
    46964684 * @remarks No-long-jump zone!!!
    46974685 */
    4698 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4686DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, RTHCPHYS HCPhysVmcb)
    46994687{
    47004688    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     
    47084696     * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    47094697     */
     4698    PVM pVM = pVCpu->CTX_SUFF(pVM);
    47104699#ifdef VBOX_WITH_KERNEL_USING_XMM
    4711     return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
    4712                              pVCpu->hm.s.svm.pfnVMRun);
     4700    return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun);
    47134701#else
    4714     return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
    4715 #endif
    4716 }
    4717 
    4718 
    4719 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    4720 /**
    4721  * Wrapper for running the nested-guest code in AMD-V.
    4722  *
    4723  * @returns VBox strict status code.
    4724  * @param   pVM         The cross context VM structure.
    4725  * @param   pVCpu       The cross context virtual CPU structure.
    4726  * @param   pCtx        Pointer to the guest-CPU context.
    4727  *
    4728  * @remarks No-long-jump zone!!!
    4729  */
    4730 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    4731 {
    4732     /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
    4733     pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
    4734 
    4735     /*
    4736      * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
    4737      * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
    4738      * callee-saved and thus the need for this XMM wrapper.
    4739      *
    4740      * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    4741      */
    4742 #ifdef VBOX_WITH_KERNEL_USING_XMM
    4743     return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
    4744                              pVCpu->hm.s.svm.pfnVMRun);
    4745 #else
    4746     return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
     4702    return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu);
    47474703#endif
    47484704}
     
    47684724    return uTicks - pVmcbNstGstCache->u64TSCOffset;
    47694725}
    4770 #endif
     4726
    47714727
    47724728/**
     
    48194775    }
    48204776
    4821     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     4777    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
    48224778    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
    48234779    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     
    49084864 *
    49094865 * @returns VBox status code.
    4910  * @param   pVM         The cross context VM structure.
    49114866 * @param   pVCpu       The cross context virtual CPU structure.
    49124867 * @param   pCtx        Pointer to the guest-CPU context.
    49134868 * @param   pcLoops     Pointer to the number of executed loops.
    49144869 */
    4915 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    4916 {
    4917     uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     4870static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     4871{
     4872    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
    49184873    Assert(pcLoops);
    49194874    Assert(*pcLoops <= cMaxResumeLoops);
     
    49334888           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    49344889        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    4935         rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
     4890        rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient);
    49364891        if (rc != VINF_SUCCESS)
    49374892            break;
     
    49444899         */
    49454900        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    4946         rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4901        rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb);
    49474902
    49484903        /* Restore any residual host-state and save any bits shared between host and guest
     
    49554910            if (rc == VINF_SUCCESS)
    49564911                rc = VERR_SVM_INVALID_GUEST_STATE;
    4957             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    4958             hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
     4912            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     4913            hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx);
    49594914            break;
    49604915        }
     
    49624917        /* Handle the #VMEXIT. */
    49634918        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    4964         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     4919        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    49654920        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    49664921        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    4967         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     4922        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    49684923        if (rc != VINF_SUCCESS)
    49694924            break;
     
    49854940 *
    49864941 * @returns VBox status code.
    4987  * @param   pVM         The cross context VM structure.
    49884942 * @param   pVCpu       The cross context virtual CPU structure.
    49894943 * @param   pCtx        Pointer to the guest-CPU context.
    49904944 * @param   pcLoops     Pointer to the number of executed loops.
    49914945 */
    4992 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    4993 {
    4994     uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     4946static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     4947{
     4948    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
    49954949    Assert(pcLoops);
    49964950    Assert(*pcLoops <= cMaxResumeLoops);
     
    50154969           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    50164970        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    5017         rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
     4971        rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient);
    50184972        if (rc != VINF_SUCCESS)
    50194973            break;
     
    50294983        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    50304984
    5031         rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4985        rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb);
    50324986
    50334987        /* Restore any residual host-state and save any bits shared between host and guest
     
    50404994            if (rc == VINF_SUCCESS)
    50414995                rc = VERR_SVM_INVALID_GUEST_STATE;
    5042             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    5043             hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
     4996            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     4997            hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx);
    50444998            return rc;
    50454999        }
     
    50475001        /* Handle the #VMEXIT. */
    50485002        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    5049         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     5003        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    50505004        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    50515005        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    5052         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     5006        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    50535007        if (rc != VINF_SUCCESS)
    50545008            break;
     
    50915045 *
    50925046 * @returns VBox status code.
    5093  * @param   pVM         The cross context VM structure.
    50945047 * @param   pVCpu       The cross context virtual CPU structure.
    50955048 * @param   pCtx        Pointer to the guest-CPU context.
     
    50985051 *                      execution loop pass the remainder value, else pass 0.
    50995052 */
    5100 static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     5053static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    51015054{
    51025055    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    51035056    Assert(pcLoops);
    5104     Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops);
     5057    Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops);
    51055058
    51065059    SVMTRANSIENT SvmTransient;
     
    51195072           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    51205073        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    5121         rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient);
     5074        rc = hmR0SvmPreRunGuestNested(pVCpu, pCtx, &SvmTransient);
    51225075        if (    rc != VINF_SUCCESS
    51235076            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     
    51345087        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    51355088
    5136         rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
     5089        rc = hmR0SvmRunGuest(pVCpu, pCtx, pCtx->hwvirt.svm.HCPhysVmcb);
    51375090
    51385091        /* Restore any residual host-state and save any bits shared between host and guest
     
    51585111        /* Handle the #VMEXIT. */
    51595112        HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    5160         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     5113        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    51615114        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
    51625115        rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient);
    5163         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     5116        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    51645117        if (    rc != VINF_SUCCESS
    51655118            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    51665119            break;
    5167         if (++(*pcLoops) >= pVM->hm.s.cMaxResumeLoops)
     5120        if (++(*pcLoops) >= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
    51685121        {
    51695122            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    51855138 *
    51865139 * @returns Strict VBox status code.
    5187  * @param   pVM         The cross context VM structure.
    51885140 * @param   pVCpu       The cross context virtual CPU structure.
    51895141 * @param   pCtx        Pointer to the guest-CPU context.
    51905142 */
    5191 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     5143VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx)
    51925144{
    51935145    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    52025154    {
    52035155        if (!pVCpu->hm.s.fSingleInstruction)
    5204             rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx, &cLoops);
     5156            rc = hmR0SvmRunGuestCodeNormal(pVCpu, pCtx, &cLoops);
    52055157        else
    5206             rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, &cLoops);
     5158            rc = hmR0SvmRunGuestCodeStep(pVCpu, pCtx, &cLoops);
    52075159    }
    52085160#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     
    52165168    if (rc == VINF_SVM_VMRUN)
    52175169    {
    5218         rc = hmR0SvmRunGuestCodeNested(pVM, pVCpu, pCtx, &cLoops);
     5170        rc = hmR0SvmRunGuestCodeNested(pVCpu, pCtx, &cLoops);
    52195171        if (rc == VINF_SVM_VMEXIT)
    52205172            rc = VINF_SUCCESS;
     
    52295181
    52305182    /* Prepare to return to ring-3. This will remove longjmp notifications. */
    5231     rc = hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
     5183    rc = hmR0SvmExitToRing3(pVCpu, pCtx, rc);
    52325184    Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
    52335185    return rc;
     
    59545906 * @retval  VERR_*                  Fatal errors.
    59555907 *
    5956  * @param   pVM         The cross context VM structure.
    59575908 * @param   pVCpu       The cross context virtual CPU structure.
    59585909 * @param   pCtx        The guest CPU context.
     
    59605911 * @remarks Updates the RIP if the instruction was executed successfully.
    59615912 */
    5962 static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     5913static int hmR0SvmInterpretInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx)
    59635914{
    59645915    /* Only allow 32 & 64 bit code. */
     
    59665917    {
    59675918        PDISSTATE pDis = &pVCpu->hm.s.DisState;
    5968         int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
     5919        int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, NULL /* pcbInstr */);
    59695920        if (   RT_SUCCESS(rc)
    59705921            && pDis->pCurInstr->uOpcode == OP_INVLPG)
     
    64476398{
    64486399    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6449     PVM pVM = pVCpu->CTX_SUFF(pVM);
    6450     Assert(!pVM->hm.s.fNestedPaging);
     6400    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    64516401
    64526402    bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx);
     
    64656415
    64666416    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    6467     int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx);    /* Updates RIP if successful. */
     6417    int rc = hmR0SvmInterpretInvlpg(pVCpu, pCtx);    /* Updates RIP if successful. */
    64686418    Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
    64696419    HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
     
    69976947    static uint32_t const s_aIOSize[8]  = { 0, 1, 2, 0, 4, 0, 0, 0 };                   /* Size of the I/O accesses in bytes. */
    69986948    static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 };  /* AND masks for saving
    6999                                                                                             the result (in AL/AX/EAX). */
     6949                                                                                           the result (in AL/AX/EAX). */
    70006950    Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
    70016951
     
    72437193     */
    72447194    if (   pVM->hm.s.fTprPatchingAllowed
    7245         && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    72467195        && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
    72477196        && (   !(u32ErrCode & X86_TRAP_PF_P)                                                             /* Not present */
    72487197            || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD))  /* MMIO page. */
     7198        && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    72497199        && !CPUMIsGuestInLongModeEx(pCtx)
    72507200        && !CPUMGetGuestCPL(pVCpu)
     
    80497999    return VINF_SUCCESS;
    80508000}
    8051 
    80528001#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
    80538002
    8054 
    80558003/** @} */
    80568004
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette