VirtualBox

Changeset 71755 in vbox


Ignore:
Timestamp:
Apr 9, 2018 8:10:23 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
121857
Message:

VMM: Nested Hw.virt: Fix overriding SVM nested-guest PAT MSR while executing with nested-guest w/ shadow paging.
Also fixes loading, validating and restoring the PAT MSR when nested-paging is used by the nested-hypervisor.

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r71643 r71755  
    13701370#ifndef IN_RC
    13711371/**
    1372  * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
     1372 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
     1373 * active.
    13731374 *
    13741375 * @returns @c true if in intercept is set, @c false otherwise.
     
    13891390
    13901391/**
    1391  * Checks if the guest VMCB has the specified CR read intercept active.
     1392 * Checks if the nested-guest VMCB has the specified CR read intercept active.
    13921393 *
    13931394 * @returns @c true if in intercept is set, @c false otherwise.
     
    14081409
    14091410/**
    1410  * Checks if the guest VMCB has the specified CR write intercept active.
     1411 * Checks if the nested-guest VMCB has the specified CR write intercept active.
    14111412 *
    14121413 * @returns @c true if in intercept is set, @c false otherwise.
     
    14271428
    14281429/**
    1429  * Checks if the guest VMCB has the specified DR read intercept active.
     1430 * Checks if the nested-guest VMCB has the specified DR read intercept active.
    14301431 *
    14311432 * @returns @c true if in intercept is set, @c false otherwise.
     
    14461447
    14471448/**
    1448  * Checks if the guest VMCB has the specified DR write intercept active.
     1449 * Checks if the nested-guest VMCB has the specified DR write intercept active.
    14491450 *
    14501451 * @returns @c true if in intercept is set, @c false otherwise.
     
    14651466
    14661467/**
    1467  * Checks if the guest VMCB has the specified exception intercept active.
     1468 * Checks if the nested-guest VMCB has the specified exception intercept active.
    14681469 *
    14691470 * @returns @c true if in intercept is active, @c false otherwise.
     
    14841485
    14851486/**
    1486  * Checks if the guest VMCB has virtual-interrupt masking enabled.
     1487 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
    14871488 *
    14881489 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
     
    14991500        return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
    15001501    return HMIsGuestSvmVirtIntrMasking(pVCpu, pCtx);
     1502}
     1503
     1504/**
     1505 * Checks if the nested-guest VMCB has nested-paging enabled.
     1506 *
     1507 * @returns @c true if nested-paging is enabled, @c false otherwise.
     1508 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     1509 * @param   pCtx        Pointer to the context.
     1510 *
     1511 * @remarks Should only be called when SVM feature is exposed to the guest.
     1512 */
     1513DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
     1514{
     1515    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     1516    Assert(pVmcb);
     1517    if (!pCtx->hwvirt.svm.fHMCachedVmcb)
     1518        return pVmcb->ctrl.NestedPaging.n.u1NestedPaging;
     1519    return HMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx);
    15011520}
    15021521
     
    17021721                                                    uint64_t *puValidEfer);
    17031722VMMDECL(void)           CPUMSetGuestMsrEferNoCheck(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
    1704 
     1723VMMDECL(bool)           CPUMIsPatMsrValid(uint64_t uValue);
    17051724
    17061725/** @name Typical scalable bus frequency values.
  • trunk/include/VBox/vmm/hm_svm.h

    r71640 r71755  
    903903    /** Offset 0x648-0x667 - Reserved. */
    904904    uint8_t     u8Reserved9[0x668 - 0x648];
    905     /** Offset 0x668 - G_PAT. */
    906     uint64_t    u64GPAT;
     905    /** Offset 0x668 - PAT (Page Attribute Table) MSR. */
     906    uint64_t    u64PAT;
    907907    /** Offset 0x670 - DBGCTL. */
    908908    uint64_t    u64DBGCTL;
     
    958958AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR2,          0x640 - 0x400);
    959959AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved9,     0x648 - 0x400);
    960 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64GPAT,         0x668 - 0x400);
     960AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64PAT,          0x668 - 0x400);
    961961AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DBGCTL,       0x670 - 0x400);
    962962AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_FROM,      0x678 - 0x400);
     
    10511051    /** Cache of DBGCTL. */
    10521052    uint64_t            u64DBGCTL;
     1053    /** Cache of the PAT MSR. */
     1054    uint64_t            u64PAT;
    10531055    /** @} */
    10541056
     
    11381140VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector);
    11391141VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx);
     1142VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
    11401143VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
    11411144VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
  • trunk/include/iprt/x86.h

    r70913 r71755  
    12361236/** Page Attribute Table. */
    12371237#define MSR_IA32_CR_PAT                     0x277
     1238/** Default PAT MSR value on processor powerup / reset (see Intel spec. 11.12.4
     1239 *  "Programming the PAT", AMD spec. 7.8.2 "PAT Indexing") */
     1240#define MSR_IA32_CR_PAT_INIT_VAL            UINT64_C(0x0007040600070406)
    12381241
    12391242/** Performance counter MSRs. (Intel only) */
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r71108 r71755  
    551551{
    552552    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
    553 
    554     for (uint32_t cShift = 0; cShift < 63; cShift += 8)
     553    if (CPUMIsPatMsrValid(uValue))
    555554    {
    556         /* Check all eight bits because the top 5 bits of each byte are reserved. */
    557         uint8_t uType = (uint8_t)(uValue >> cShift);
    558         if ((uType >= 8) || (uType == 2) || (uType == 3))
    559         {
    560             Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n",
    561                  cShift + 7, cShift, uValue, uType));
    562             return VERR_CPUM_RAISE_GP_0;
    563         }
     555        pVCpu->cpum.s.Guest.msrPAT = uValue;
     556        return VINF_SUCCESS;
    564557    }
    565 
    566     pVCpu->cpum.s.Guest.msrPAT = uValue;
    567     return VINF_SUCCESS;
     558    return VERR_CPUM_RAISE_GP_0;
    568559}
    569560
     
    62096200
    62106201/**
     6202 * Checks if a guest PAT MSR write is valid.
     6203 *
     6204 * @returns @c true if the PAT bit combination is valid, @c false otherwise.
     6205 * @param   uValue      The PAT MSR value.
     6206 */
     6207VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue)
     6208{
     6209    for (uint32_t cShift = 0; cShift < 63; cShift += 8)
     6210    {
     6211        /* Check all eight bits because the top 5 bits of each byte are reserved. */
     6212        uint8_t uType = (uint8_t)(uValue >> cShift);
     6213        if ((uType >= 8) || (uType == 2) || (uType == 3))
     6214        {
     6215            Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", cShift + 7, cShift, uValue, uType));
     6216            return false;
     6217        }
     6218    }
     6219    return true;
     6220}
     6221
     6222
     6223/**
    62116224 * Validates an EFER MSR write.
    62126225 *
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r71640 r71755  
    132132 * in IEM).
    133133 *
    134  * @param   pVCpu           The cross context virtual CPU structure.
    135  * @param   pCtx            Pointer to the guest-CPU context.
     134 * @param   pVCpu   The cross context virtual CPU structure.
     135 * @param   pCtx    Pointer to the guest-CPU context.
    136136 *
    137137 * @sa      hmR0SvmVmRunCacheVmcb.
     
    169169        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking      = pNstGstVmcbCache->fVIntrMasking;
    170170        pVmcbNstGstCtrl->TLBCtrl                       = pNstGstVmcbCache->TLBCtrl;
     171
     172        /*
     173         * If the nested-hypervisor isn't using nested-paging (and thus shadow paging
     174         * is used by HM), we restore the original PAT MSR from the nested-guest VMCB.
     175         * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by
     176         * hardware-assisted SVM or by IEM.
     177         */
     178        if (!pNstGstVmcbCache->u1NestedPaging)
     179            pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT;
     180
    171181        pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging;
    172182        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt           = pNstGstVmcbCache->u1LbrVirt;
     
    177187     * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full
    178188     * CPU state change. However, if we exit to ring-3 in response to receiving a physical
    179      * interrupt, we skip signaling any CPU state change as normally no change
    180      * is done to the execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
    181      * However, with nested-guests, the state can change for e.g., we might perform a
    182      * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU
    183      * state change here.
     189     * interrupt, we skip signaling any CPU state change as normally no change is done to the
     190     * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
     191     *
     192     * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a
     193     * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state
     194     * change here.
    184195     */
    185196    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     
    439450
    440451/**
    441  * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
     452 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
     453 * active.
    442454 *
    443455 * @returns @c true if in intercept is set, @c false otherwise.
     
    456468
    457469/**
    458  * Checks if the guest VMCB has the specified CR read intercept active.
     470 * Checks if the nested-guest VMCB has the specified CR read intercept active.
    459471 *
    460472 * @returns @c true if in intercept is set, @c false otherwise.
     
    473485
    474486/**
    475  * Checks if the guest VMCB has the specified CR write intercept
    476  * active.
     487 * Checks if the nested-guest VMCB has the specified CR write intercept active.
    477488 *
    478489 * @returns @c true if in intercept is set, @c false otherwise.
     
    491502
    492503/**
    493  * Checks if the guest VMCB has the specified DR read intercept
    494  * active.
     504 * Checks if the nested-guest VMCB has the specified DR read intercept active.
    495505 *
    496506 * @returns @c true if in intercept is set, @c false otherwise.
     
    509519
    510520/**
    511  * Checks if the guest VMCB has the specified DR write intercept active.
     521 * Checks if the nested-guest VMCB has the specified DR write intercept active.
    512522 *
    513523 * @returns @c true if in intercept is set, @c false otherwise.
     
    526536
    527537/**
    528  * Checks if the guest VMCB has the specified exception intercept active.
     538 * Checks if the nested-guest VMCB has the specified exception intercept active.
    529539 *
    530540 * @returns true if in intercept is active, false otherwise.
     
    543553
    544554/**
    545  * Checks if the guest VMCB has virtual-interrupts masking enabled.
     555 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
    546556 *
    547557 * @returns true if virtual-interrupts are masked, @c false otherwise.
     
    554564    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    555565    return pVmcbNstGstCache->fVIntrMasking;
     566}
     567
     568
     569/**
     570 * Checks if the nested-guest VMCB has nested-paging enabled.
     571 *
     572 * @returns true if nested-paging is enabled, @c false otherwise.
     573 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
     574 * @param   pCtx    Pointer to the context.
     575 */
     576VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
     577{
     578    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     579    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     580    return RT_BOOL(pVmcbNstGstCache->u1NestedPaging);
    556581}
    557582
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r71530 r71755  
    152152        pVmcbNstGstState->u8CPL         = pCtx->ss.Attr.n.u2Dpl;   /* See comment in CPUMGetGuestCPL(). */
    153153        Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
     154        if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))
     155            pVmcbNstGstState->u64PAT = pCtx->msrPAT;
    154156
    155157        PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     
    180182        else
    181183            pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
    182 
    183         /** @todo NRIP. */
    184184
    185185        /* Save exit information. */
     
    459459        }
    460460
    461         /** @todo gPAT MSR validation? */
     461        /*
     462         * PAT (Page Attribute Table) MSR.
     463         *
     464         * The CPU only validates and loads it when nested-paging is enabled.
     465         * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
     466         */
     467        if (   pVmcbCtrl->NestedPaging.n.u1NestedPaging
     468            && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
     469        {
     470            Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
     471            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     472        }
    462473
    463474        /*
     
    614625        pCtx->rip        = pVmcbNstGst->u64RIP;
    615626        CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer);
     627        if (pVmcbCtrl->NestedPaging.n.u1NestedPaging)
     628            pCtx->msrPAT = pVmcbNstGst->u64PAT;
    616629
    617630        /* Mask DR6, DR7 bits mandatory set/clear bits. */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71697 r71755  
    10011001        /*
    10021002         * Setup the PAT MSR (applicable for Nested Paging only).
    1003          * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
    1004          * so choose type 6 for all PAT slots.
     1003         *
     1004         * While guests can modify and see the modified values throug the shadow values,
     1005         * we shall not honor any guest modifications of this MSR to ensure caching is always
     1006         * enabled similar to how we always run with CR0.CD and NW bits cleared.
    10051007         */
    1006         pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
     1008        pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
    10071009
    10081010        /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
     
    17541756    pVmcb->guest.u64SFMASK       = pCtx->msrSFMASK;
    17551757    pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
     1758
     1759    /* We don't honor guest modifications to its PAT MSR (similar to ignoring CR0.CD, NW bits). */
    17561760}
    17571761
     
    24172421        pVmcbNstGstCache->u64CR4            = pVmcbNstGstState->u64CR4;
    24182422        pVmcbNstGstCache->u64EFER           = pVmcbNstGstState->u64EFER;
     2423        pVmcbNstGstCache->u64PAT            = pVmcbNstGstState->u64PAT;
    24192424        pVmcbNstGstCache->u64DBGCTL         = pVmcbNstGstState->u64DBGCTL;
    24202425        pVmcbNstGstCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
     
    24702475        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
    24712476        pVmcbNstGst->guest.u64DBGCTL = pVmcb->guest.u64DBGCTL;
     2477
     2478        /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */
     2479        pVmcbNstGst->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
    24722480    }
    24732481    else
     
    39193927        Log4(("guest.u64SFMASK                   %#RX64\n",   pVmcb->guest.u64SFMASK));
    39203928        Log4(("guest.u64KernelGSBase             %#RX64\n",   pVmcb->guest.u64KernelGSBase));
    3921         Log4(("guest.u64GPAT                     %#RX64\n",   pVmcb->guest.u64GPAT));
     3929        Log4(("guest.u64PAT                      %#RX64\n",   pVmcb->guest.u64PAT));
    39223930        Log4(("guest.u64DBGCTL                   %#RX64\n",   pVmcb->guest.u64DBGCTL));
    39233931        Log4(("guest.u64BR_FROM                  %#RX64\n",   pVmcb->guest.u64BR_FROM));
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r71443 r71755  
    12311231     */
    12321232    /* Init PAT MSR */
    1233     pCtx->msrPAT                    = UINT64_C(0x0007040600070406); /** @todo correct? */
     1233    pCtx->msrPAT                    = MSR_IA32_CR_PAT_INIT_VAL;
    12341234
    12351235    /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
     
    23592359    pHlp->pfnPrintf(pHlp, "%su64SysEnterESP             = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterESP);
    23602360    pHlp->pfnPrintf(pHlp, "%su64CR2                     = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR2);
    2361     pHlp->pfnPrintf(pHlp, "%su64GPAT                    = %#RX64\n", pszPrefix, pVmcbStateSave->u64GPAT);
     2361    pHlp->pfnPrintf(pHlp, "%su64PAT                     = %#RX64\n", pszPrefix, pVmcbStateSave->u64PAT);
    23622362    pHlp->pfnPrintf(pHlp, "%su64DBGCTL                  = %#RX64\n", pszPrefix, pVmcbStateSave->u64DBGCTL);
    23632363    pHlp->pfnPrintf(pHlp, "%su64BR_FROM                 = %#RX64\n", pszPrefix, pVmcbStateSave->u64BR_FROM);
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r71415 r71755  
    35343534        rc = SSMR3PutU32(pSSM, pPatch->cFaults);
    35353535        AssertRCReturn(rc, rc);
     3536        /** @todo We need to save SVMNESTEDVMCBCACHE (if pCtx fHMCached is true as we
     3537         *        are in nested-geust execution and the cache contains pristine
     3538         *        fields that we only restore on #VMEXIT and not on
     3539         *        every exit-to-ring 3. */
    35363540    }
    35373541#endif
     
    35533557    int rc;
    35543558
    3555     Log(("hmR3Load:\n"));
     3559    LogFlowFunc(("uVersion=%u\n", uVersion));
    35563560    Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
    35573561
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette