VirtualBox

Changeset 69764 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Nov 20, 2017 9:14:10 AM (7 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: SVM fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r69763 r69764  
    20022002 * hardware-assisted SVM.
    20032003 *
     2004 * @returns true if the VMCB was previously already cached, false otherwise.
    20042005 * @param   pCtx            Pointer to the guest-CPU context.
    20052006 *
    20062007 * @sa      HMSvmNstGstVmExitNotify.
    20072008 */
    2008 static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     2009static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
    20092010{
    20102011    PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     
    20202021     * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
    20212022     */
    2022     if (!pNstGstVmcbCache->fValid)
     2023    bool const fWasCached = pCtx->hwvirt.svm.fHMCachedVmcb;
     2024    if (!fWasCached)
    20232025    {
    20242026        pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
     
    20372039        pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
    20382040        pNstGstVmcbCache->NestedPagingCtrl  = pVmcbNstGstCtrl->NestedPaging;
    2039         pNstGstVmcbCache->fValid            = true;
     2041        pCtx->hwvirt.svm.fHMCachedVmcb      = true;
    20402042        Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
    20412043    }
     2044
     2045    return fWasCached;
    20422046}
    20432047
     
    20582062     * First cache the nested-guest VMCB fields we may potentially modify.
    20592063     */
    2060     hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
    2061 
    2062     /*
    2063      * The IOPM of the nested-guest can be ignored because the the guest always
    2064      * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
    2065      * into the nested-guest one and swap it back on the #VMEXIT.
    2066      */
    2067     pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
    2068 
    2069     /*
    2070      * Load the host-physical address into the MSRPM rather than the nested-guest
    2071      * physical address (currently we trap all MSRs in the nested-guest).
    2072      */
    2073     pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
    2074 
    2075     /*
    2076      * Use the same nested-paging as the "outer" guest. We can't dynamically
    2077      * switch off nested-paging suddenly while executing a VM (see assertion at the
    2078      * end of Trap0eHandler in PGMAllBth.h).
    2079      */
    2080     pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
     2064    bool const fVmcbCached = hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
     2065    if (!fVmcbCached)
     2066    {
     2067        /*
     2068         * The IOPM of the nested-guest can be ignored because the the guest always
     2069         * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
     2070         * into the nested-guest one and swap it back on the #VMEXIT.
     2071         */
     2072        pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
     2073
     2074        /*
     2075         * Load the host-physical address into the MSRPM rather than the nested-guest
     2076         * physical address (currently we trap all MSRs in the nested-guest).
     2077         */
     2078        pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
     2079
     2080        /*
     2081         * Use the same nested-paging as the "outer" guest. We can't dynamically
     2082         * switch off nested-paging suddenly while executing a VM (see assertion at the
     2083         * end of Trap0eHandler in PGMAllBth.h).
     2084         */
     2085        pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
     2086    }
     2087    else
     2088    {
     2089        Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
     2090        Assert(pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap);
     2091        Assert(pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
     2092    }
    20812093}
    20822094
     
    20972109    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    20982110    Assert(pVmcbNstGst);
    2099 
    2100     hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    21012111
    21022112    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
     
    25412551    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    25422552    VMMRZCallRing3Disable(pVCpu);
    2543     Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit));
     2553    Log4(("hmR0SvmExitToRing3: VCPU[%u]: rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", pVCpu->idCpu, rcExit,
     2554          pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions));
    25442555
    25452556    /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
     
    25702581    if (rcExit != VINF_EM_RAW_INTERRUPT)
    25712582        HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     2583
     2584#ifdef VBOX_WITH_NESTED_HWVIRT
     2585    /*
     2586     * We may inspect the nested-guest VMCB state in ring-3 (e.g. for injecting interrupts)
     2587     * and thus we need to restore any modifications we may have made to it here if we're
     2588     * still executing the nested-guest.
     2589     */
     2590    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     2591        HMSvmNstGstVmExitNotify(pVCpu, pCtx);
     2592#endif
    25722593
    25732594    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    30213042
    30223043#ifdef VBOX_WITH_NESTED_HWVIRT
     3044/**
     3045 * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
     3046 * interrupts.
     3047 *
     3048 * @returns true if it's ready, false otherwise.
     3049 * @param   pCtx        The guest-CPU context.
     3050 *
     3051 * @remarks This function looks at the VMCB cache rather than directly at the
     3052 *          nested-guest VMCB which may have been suitably modified for executing
     3053 *          using hardware-assisted SVM.
     3054 */
     3055static bool hmR0SvmCanNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
     3056{
     3057    Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
     3058    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     3059    X86EFLAGS fEFlags;
     3060    if (pVmcbNstGstCache->fVIntrMasking)
     3061        fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
     3062    else
     3063        fEFlags.u = pCtx->eflags.u;
     3064
     3065    return fEFlags.Bits.u1IF;
     3066}
     3067
     3068
    30233069/**
    30243070 * Evaluates the event to be delivered to the nested-guest and sets it as the
     
    30883134         * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    30893135         */
     3136        PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     3137        Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
    30903138        if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    30913139            && !fIntShadow
    30923140            && !pVCpu->hm.s.fSingleInstruction
    3093             && CPUMCanSvmNstGstTakePhysIntr(pCtx))
    3094         {
    3095             if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR))
     3141            && hmR0SvmCanNstGstTakePhysIntr(pVCpu, pCtx))
     3142        {
     3143            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR)
    30963144            {
    30973145                Log4(("Intercepting external interrupt -> #VMEXIT\n"));
     
    31283176        /*
    31293177         * Check if the nested-guest can receive virtual (injected by VMRUN) interrupts.
    3130          * We can call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any
     3178         * We can safely call CPUMCanSvmNstGstTakeVirtIntr here as we don't cache/modify any
    31313179         * nested-guest VMCB interrupt control fields besides V_INTR_MASKING, see hmR0SvmVmRunCacheVmcb.
    31323180         */
    3133         if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
    3134             && CPUMCanSvmNstGstTakeVirtIntr(pCtx)
    3135             && CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VINTR))
     3181        if (   (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
     3182            && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
     3183            && CPUMCanSvmNstGstTakeVirtIntr(pCtx))
    31363184        {
    31373185            Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
     
    35693617        return rc;
    35703618
     3619    hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
     3620
    35713621    if (TRPMHasTrap(pVCpu))
    35723622        hmR0SvmTrpmTrapToPendingEvent(pVCpu);
     
    35983648    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    35993649
    3600     Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fValid);
     3650    Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
    36013651
    36023652    /*
     
    41334183    hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst);       /* Save the nested-guest state from the VMCB to the
    41344184                                                                   guest-CPU context. */
    4135 
    4136     /*
    4137      * Currently, reload the entire nested-guest VMCB due to code that directly inspects
    4138      * the nested-guest VMCB instead of the cache, e.g. hmR0SvmEvaluatePendingEventNested.
    4139      */
    4140     HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst);
    4141     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    41424185}
    41434186#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette