VirtualBox

Changeset 71755 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Apr 9, 2018 8:10:23 AM (7 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: Fix overriding SVM nested-guest PAT MSR while executing with nested-guest w/ shadow paging.
Also fixes loading, validating and restoring the PAT MSR when nested-paging is used by the nested-hypervisor.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r71108 r71755  
    551551{
    552552    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
    553 
    554     for (uint32_t cShift = 0; cShift < 63; cShift += 8)
     553    if (CPUMIsPatMsrValid(uValue))
    555554    {
    556         /* Check all eight bits because the top 5 bits of each byte are reserved. */
    557         uint8_t uType = (uint8_t)(uValue >> cShift);
    558         if ((uType >= 8) || (uType == 2) || (uType == 3))
    559         {
    560             Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n",
    561                  cShift + 7, cShift, uValue, uType));
    562             return VERR_CPUM_RAISE_GP_0;
    563         }
     555        pVCpu->cpum.s.Guest.msrPAT = uValue;
     556        return VINF_SUCCESS;
    564557    }
    565 
    566     pVCpu->cpum.s.Guest.msrPAT = uValue;
    567     return VINF_SUCCESS;
     558    return VERR_CPUM_RAISE_GP_0;
    568559}
    569560
     
    62096200
    62106201/**
     6202 * Checks if a guest PAT MSR write is valid.
     6203 *
     6204 * @returns @c true if the PAT bit combination is valid, @c false otherwise.
     6205 * @param   uValue      The PAT MSR value.
     6206 */
     6207VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue)
     6208{
     6209    for (uint32_t cShift = 0; cShift < 63; cShift += 8)
     6210    {
     6211        /* Check all eight bits because the top 5 bits of each byte are reserved. */
     6212        uint8_t uType = (uint8_t)(uValue >> cShift);
     6213        if ((uType >= 8) || (uType == 2) || (uType == 3))
     6214        {
     6215            Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", cShift + 7, cShift, uValue, uType));
     6216            return false;
     6217        }
     6218    }
     6219    return true;
     6220}
     6221
     6222
     6223/**
    62116224 * Validates an EFER MSR write.
    62126225 *
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r71640 r71755  
    132132 * in IEM).
    133133 *
    134  * @param   pVCpu           The cross context virtual CPU structure.
    135  * @param   pCtx            Pointer to the guest-CPU context.
     134 * @param   pVCpu   The cross context virtual CPU structure.
     135 * @param   pCtx    Pointer to the guest-CPU context.
    136136 *
    137137 * @sa      hmR0SvmVmRunCacheVmcb.
     
    169169        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking      = pNstGstVmcbCache->fVIntrMasking;
    170170        pVmcbNstGstCtrl->TLBCtrl                       = pNstGstVmcbCache->TLBCtrl;
     171
     172        /*
     173         * If the nested-hypervisor isn't using nested-paging (and thus shadow paging
     174         * is used by HM), we restore the original PAT MSR from the nested-guest VMCB.
     175         * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by
     176         * hardware-assisted SVM or by IEM.
     177         */
     178        if (!pNstGstVmcbCache->u1NestedPaging)
     179            pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT;
     180
    171181        pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging;
    172182        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt           = pNstGstVmcbCache->u1LbrVirt;
     
    177187     * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full
    178188     * CPU state change. However, if we exit to ring-3 in response to receiving a physical
    179      * interrupt, we skip signaling any CPU state change as normally no change
    180      * is done to the execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
    181      * However, with nested-guests, the state can change for e.g., we might perform a
    182      * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU
    183      * state change here.
     189     * interrupt, we skip signaling any CPU state change as normally no change is done to the
     190     * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
     191     *
     192     * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a
     193     * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state
     194     * change here.
    184195     */
    185196    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     
    439450
    440451/**
    441  * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
     452 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
     453 * active.
    442454 *
    443455 * @returns @c true if in intercept is set, @c false otherwise.
     
    456468
    457469/**
    458  * Checks if the guest VMCB has the specified CR read intercept active.
     470 * Checks if the nested-guest VMCB has the specified CR read intercept active.
    459471 *
    460472 * @returns @c true if in intercept is set, @c false otherwise.
     
    473485
    474486/**
    475  * Checks if the guest VMCB has the specified CR write intercept
    476  * active.
     487 * Checks if the nested-guest VMCB has the specified CR write intercept active.
    477488 *
    478489 * @returns @c true if in intercept is set, @c false otherwise.
     
    491502
    492503/**
    493  * Checks if the guest VMCB has the specified DR read intercept
    494  * active.
     504 * Checks if the nested-guest VMCB has the specified DR read intercept active.
    495505 *
    496506 * @returns @c true if in intercept is set, @c false otherwise.
     
    509519
    510520/**
    511  * Checks if the guest VMCB has the specified DR write intercept active.
     521 * Checks if the nested-guest VMCB has the specified DR write intercept active.
    512522 *
    513523 * @returns @c true if in intercept is set, @c false otherwise.
     
    526536
    527537/**
    528  * Checks if the guest VMCB has the specified exception intercept active.
     538 * Checks if the nested-guest VMCB has the specified exception intercept active.
    529539 *
    530540 * @returns true if in intercept is active, false otherwise.
     
    543553
    544554/**
    545  * Checks if the guest VMCB has virtual-interrupts masking enabled.
     555 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
    546556 *
    547557 * @returns true if virtual-interrupts are masked, @c false otherwise.
     
    554564    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    555565    return pVmcbNstGstCache->fVIntrMasking;
     566}
     567
     568
     569/**
     570 * Checks if the nested-guest VMCB has nested-paging enabled.
     571 *
     572 * @returns true if nested-paging is enabled, @c false otherwise.
     573 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
     574 * @param   pCtx    Pointer to the context.
     575 */
     576VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
     577{
     578    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     579    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     580    return RT_BOOL(pVmcbNstGstCache->u1NestedPaging);
    556581}
    557582
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r71530 r71755  
    152152        pVmcbNstGstState->u8CPL         = pCtx->ss.Attr.n.u2Dpl;   /* See comment in CPUMGetGuestCPL(). */
    153153        Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
     154        if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))
     155            pVmcbNstGstState->u64PAT = pCtx->msrPAT;
    154156
    155157        PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     
    180182        else
    181183            pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
    182 
    183         /** @todo NRIP. */
    184184
    185185        /* Save exit information. */
     
    459459        }
    460460
    461         /** @todo gPAT MSR validation? */
     461        /*
     462         * PAT (Page Attribute Table) MSR.
     463         *
     464         * The CPU only validates and loads it when nested-paging is enabled.
     465         * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
     466         */
     467        if (   pVmcbCtrl->NestedPaging.n.u1NestedPaging
     468            && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
     469        {
     470            Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
     471            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     472        }
    462473
    463474        /*
     
    614625        pCtx->rip        = pVmcbNstGst->u64RIP;
    615626        CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer);
     627        if (pVmcbCtrl->NestedPaging.n.u1NestedPaging)
     628            pCtx->msrPAT = pVmcbNstGst->u64PAT;
    616629
    617630        /* Mask DR6, DR7 bits mandatory set/clear bits. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette