VirtualBox

Changeset 81177 in vbox


Ignore:
Timestamp:
Oct 9, 2019 10:41:01 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Nested SVM: bugref:7243 Coalesce hmR0SvmExportGuestStateNested into hmR0SvmExportGuestState.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r81170 r81177  
    19301930}
    19311931
    1932 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    1933 /**
    1934  * Exports the nested-guest hardware virtualization state into the nested-guest
     1932/**
     1933 * Exports the hardware virtualization state into the nested-guest
    19351934 * VMCB.
    19361935 *
    1937  * @param   pVCpu         The cross context virtual CPU structure.
    1938  * @param   pVmcbNstGst   Pointer to the nested-guest VM control block.
     1936 * @param   pVCpu   The cross context virtual CPU structure.
     1937 * @param   pVmcb   Pointer to the VM control block.
    19391938 *
    19401939 * @remarks No-long-jump zone!!!
    19411940 */
    1942 static void hmR0SvmExportGuestHwvirtStateNested(PVMCPUCC pVCpu, PSVMVMCB pVmcbNstGst)
     1941static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
    19431942{
    19441943    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    19461945    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
    19471946    {
     1947        if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
     1948        {
     1949            PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     1950            PCVM      pVM  = pVCpu->CTX_SUFF(pVM);
     1951
     1952            HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);                                /* Nested VGIF is not supported yet. */
     1953            Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);    /* Physical hardware supports VGIF. */
     1954            Assert(HMIsSvmVGifActive(pVM));                                        /* Outer VM has enabled VGIF. */
     1955            NOREF(pVM);
     1956
     1957            pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
     1958        }
     1959
    19481960        /*
    19491961         * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
     
    19541966         * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
    19551967         */
    1956         PVMCC            pVM = pVCpu->CTX_SUFF(pVM);
    1957         PSVMVMCBCTRL   pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     1968        PVMCC          pVM = pVCpu->CTX_SUFF(pVM);
     1969        PSVMVMCBCTRL   pVmcbCtrl = &pVmcb->ctrl;
    19581970        uint16_t const uGuestPauseFilterCount     = pVM->hm.s.svm.cPauseFilter;
    19591971        uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
     
    19611973        {
    19621974            PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1963             pVmcbNstGstCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
    1964             pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
    1965             pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1975            pVmcbCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
     1976            pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
    19661977        }
    19671978        else
    19681979        {
    1969             pVmcbNstGstCtrl->u16PauseFilterCount     = uGuestPauseFilterCount;
    1970             pVmcbNstGstCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
    1971         }
     1980            /** @todo r=ramshankar: We can turn these assignments into assertions. */
     1981            pVmcbCtrl->u16PauseFilterCount     = uGuestPauseFilterCount;
     1982            pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
     1983        }
     1984        pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    19721985
    19731986        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
    19741987    }
    19751988}
    1976 #endif
     1989
    19771990
    19781991/**
     
    19851998static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
    19861999{
     2000    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
     2001
    19872002    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
    19882003    {
     
    20422057
    20432058/**
    2044  * Sets up the exception interrupts required for guest (or nested-guest)
    2045  * execution in the VMCB.
     2059 * Sets up the exception interrupts required for guest execution in the VMCB.
    20462060 *
    20472061 * @param   pVCpu       The cross context virtual CPU structure.
     
    20522066static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
    20532067{
    2054     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     2068    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
    20552069
    20562070    /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
    2057     if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_XCPT_INTERCEPTS)
     2071    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS)
    20582072    {
    20592073        /* Trap #UD for GIM provider (e.g. for hypercalls). */
     
    20702084
    20712085        /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
    2072         pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_SVM_XCPT_INTERCEPTS;
     2086        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS);
    20732087    }
    20742088}
     
    23022316
    23032317/**
    2304  * Exports the guest state from the guest-CPU context into the VMCB.
    2305  *
    2306  * The CPU state will be loaded from these fields on every successful VM-entry.
    2307  * Also sets up the appropriate VMRUN function to execute guest code based on
    2308  * the guest CPU mode.
     2318 * Exports the guest or nested-guest state from the virtual-CPU context into the
     2319 * VMCB.
     2320 *
     2321 * Also sets up the appropriate VMRUN function to execute guest or nested-guest
     2322 * code based on the virtual-CPU mode.
    23092323 *
    23102324 * @returns VBox status code.
    2311  * @param   pVCpu       The cross context virtual CPU structure.
     2325 * @param   pVCpu           The cross context virtual CPU structure.
     2326 * @param   pSvmTransient   Pointer to the SVM-transient structure.
    23122327 *
    23132328 * @remarks No-long-jump zone!!!
    23142329 */
    2315 static int hmR0SvmExportGuestState(PVMCPUCC pVCpu)
     2330static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
    23162331{
    23172332    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
    23182333
    2319     PSVMVMCB  pVmcb = pVCpu->hm.s.svm.pVmcb;
     2334    PSVMVMCB  pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
    23202335    PCCPUMCTX pCtx  = &pVCpu->cpum.GstCtx;
    2321 
    23222336    Assert(pVmcb);
    2323     HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    23242337
    23252338    pVmcb->guest.u64RIP    = pCtx->rip;
     
    23272340    pVmcb->guest.u64RFlags = pCtx->eflags.u32;
    23282341    pVmcb->guest.u64RAX    = pCtx->rax;
    2329 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2330     if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
    2331     {
    2332         Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);    /* Hardware supports it. */
    2333         Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)));                                        /* VM has configured it. */
    2334         pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
    2335     }
    2336 #endif
    2337 
     2342
     2343    bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest;
    23382344    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    23392345
    23402346    int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
    23412347    AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
    2342 
    23432348    hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
    23442349    hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
    2345     hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
     2350    hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb);
    23462351
    23472352    ASMSetFlags(fEFlags);
    23482353
    2349     /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
    2350        otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
    2351     hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
     2354    if (!fIsNestedGuest)
     2355    {
     2356        /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
     2357           otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
     2358        hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
     2359        hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
     2360    }
    23522361
    23532362    rc = hmR0SvmSelectVMRunHandler(pVCpu);
     
    23552364
    23562365    /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    2357     ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(   HM_CHANGED_GUEST_RIP
    2358                                                   |  HM_CHANGED_GUEST_RFLAGS
    2359                                                   |  HM_CHANGED_GUEST_GPRS_MASK
    2360                                                   |  HM_CHANGED_GUEST_X87
    2361                                                   |  HM_CHANGED_GUEST_SSE_AVX
    2362                                                   |  HM_CHANGED_GUEST_OTHER_XSAVE
    2363                                                   |  HM_CHANGED_GUEST_XCRx
    2364                                                   |  HM_CHANGED_GUEST_TSC_AUX
    2365                                                   |  HM_CHANGED_GUEST_OTHER_MSRS
    2366                                                   |  HM_CHANGED_GUEST_HWVIRT
    2367                                                   | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_XCPT_INTERCEPTS)));
     2366    uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP
     2367                         | HM_CHANGED_GUEST_RFLAGS
     2368                         | HM_CHANGED_GUEST_GPRS_MASK
     2369                         | HM_CHANGED_GUEST_X87
     2370                         | HM_CHANGED_GUEST_SSE_AVX
     2371                         | HM_CHANGED_GUEST_OTHER_XSAVE
     2372                         | HM_CHANGED_GUEST_XCRx
     2373                         | HM_CHANGED_GUEST_TSC_AUX
     2374                         | HM_CHANGED_GUEST_OTHER_MSRS;
     2375    if (fIsNestedGuest)
     2376        fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS
     2377                    |  HM_CHANGED_GUEST_APIC_TPR;
     2378
     2379    ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(  fUnusedMask
     2380                                                  | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
    23682381
    23692382#ifdef VBOX_STRICT
     
    24722485    PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    24732486
     2487    HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
     2488
    24742489    /*
    24752490     * First cache the nested-guest VMCB fields we may potentially modify.
     
    25342549        Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    25352550    }
    2536 }
    2537 
    2538 
    2539 /**
    2540  * Exports the nested-guest state into the VMCB.
    2541  *
    2542  * We need to export the entire state as we could be continuing nested-guest
    2543  * execution at any point (not just immediately after VMRUN) and thus the VMCB
    2544  * can be out-of-sync with the nested-guest state if it was executed in IEM.
    2545  *
    2546  * @returns VBox status code.
    2547  * @param   pVCpu       The cross context virtual CPU structure.
    2548  * @param   pCtx        Pointer to the guest-CPU context.
    2549  *
    2550  * @remarks No-long-jump zone!!!
    2551  */
    2552 static int hmR0SvmExportGuestStateNested(PVMCPUCC pVCpu)
    2553 {
    2554     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
    2555 
    2556     PCCPUMCTX   pCtx        = &pVCpu->cpum.GstCtx;
    2557     PSVMVMCB    pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2558     Assert(pVmcbNstGst);
    2559 
    2560     hmR0SvmSetupVmcbNested(pVCpu);
    2561 
    2562     pVmcbNstGst->guest.u64RIP    = pCtx->rip;
    2563     pVmcbNstGst->guest.u64RSP    = pCtx->rsp;
    2564     pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
    2565     pVmcbNstGst->guest.u64RAX    = pCtx->rax;
    2566 
    2567     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    2568 
    2569     int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcbNstGst);
    2570     AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
    2571 
    2572     hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcbNstGst);
    2573     hmR0SvmExportGuestMsrs(pVCpu, pVmcbNstGst);
    2574     hmR0SvmExportGuestHwvirtStateNested(pVCpu, pVmcbNstGst);
    2575 
    2576     ASMSetFlags(fEFlags);
    2577 
    2578     /* Nested VGIF not supported yet. */
    2579     Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable);
    2580 
    2581     rc = hmR0SvmSelectVMRunHandler(pVCpu);
    2582     AssertRCReturn(rc, rc);
    2583 
    2584     /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    2585     ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(   HM_CHANGED_GUEST_RIP
    2586                                                   |  HM_CHANGED_GUEST_RFLAGS
    2587                                                   |  HM_CHANGED_GUEST_GPRS_MASK
    2588                                                   |  HM_CHANGED_GUEST_APIC_TPR
    2589                                                   |  HM_CHANGED_GUEST_X87
    2590                                                   |  HM_CHANGED_GUEST_SSE_AVX
    2591                                                   |  HM_CHANGED_GUEST_OTHER_XSAVE
    2592                                                   |  HM_CHANGED_GUEST_XCRx
    2593                                                   |  HM_CHANGED_GUEST_TSC_AUX
    2594                                                   |  HM_CHANGED_GUEST_OTHER_MSRS
    2595                                                   |  HM_CHANGED_SVM_XCPT_INTERCEPTS
    2596                                                   | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
    2597 
    2598 #ifdef VBOX_STRICT
    2599     /*
    2600      * All of the guest-CPU state and SVM keeper bits should be exported here by now, except
    2601      * for the host-context and/or shared host-guest context bits.
    2602      */
    2603     uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
    2604     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2605     AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
    2606               ("fCtxChanged=%#RX64\n", fCtxChanged));
    2607 
    2608     /*
    2609      * If we need to log state that isn't always imported, we'll need to import them here.
    2610      * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
    2611      */
    2612     hmR0SvmLogState(pVCpu, pVmcbNstGst, "hmR0SvmExportGuestStateNested", 0 /* fFlags */, 0 /* uVerbose */);
    2613 #endif
    2614 
    2615     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
    2616     return rc;
    26172551}
    26182552#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
     
    41084042
    41094043    /*
     4044     * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
     4045     */
     4046    if (pSvmTransient->fIsNestedGuest)
     4047        hmR0SvmSetupVmcbNested(pVCpu);
     4048
     4049    /*
    41104050     * Export the guest state bits that are not shared with the host in any way as we can
    41114051     * longjmp or get preempted in the midst of exporting some of the state.
    41124052     */
    4113     if (!pSvmTransient->fIsNestedGuest)
    4114         rc = hmR0SvmExportGuestState(pVCpu);
    4115     else
    4116         rc = hmR0SvmExportGuestStateNested(pVCpu);
     4053    rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient);
    41174054    AssertRCReturn(rc, rc);
    41184055    STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette