VirtualBox

Changeset 71504 in vbox for trunk


Ignore:
Timestamp:
Mar 26, 2018 6:07:57 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
121476
Message:

VMM/HMSVMR0: Avoid intercepting CR4 reads when possible. Added todo about updating nested-guest TSC offsetting.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71476 r71504  
    952952                                       | SVM_CTRL_INTERCEPT_VMMCALL;
    953953
    954         /*
    955          * CR0, CR4 reads/writes must be intercepted, as our shadow values may differ from the guest's.
    956          * These interceptions might be relaxed later during VM execution if the conditions allow.
    957          */
    958         pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
    959         pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
     954        /* CR4 writes must always be intercepted for tracking PGM mode changes. */
     955        pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(4);
    960956
    961957        /* Intercept all DRx reads and writes by default. Changed later on. */
     
    10851081#endif
    10861082    return pVCpu->hm.s.svm.pVmcb;
     1083}
     1084
     1085
     1086/**
     1087 * Gets a pointer to the nested-guest VMCB cache.
     1088 *
     1089 * @returns Pointer to the nested-guest VMCB cache.
     1090 * @param   pVCpu           The cross context virtual CPU structure.
     1091 * @param   pCtx            Pointer to the guest-CPU context.
     1092 */
     1093DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu, PCPUMCTX pCtx)
     1094{
     1095#ifdef VBOX_WITH_NESTED_HWVIRT
     1096    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);
     1097    return &pVCpu->hm.s.svm.NstGstVmcbCache;
     1098#else
     1099    RT_NOREF2(pVCpu, pCtx);
     1100    return NULL;
     1101#endif
    10871102}
    10881103
     
    13931408        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    13941409        {
    1395             Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
    1396             PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     1410            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
    13971411            fRemoveXcpt = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(u32Xcpt));
    13981412        }
     
    14751489        {
    14761490            /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
    1477             PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    1478             Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
     1491            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
    14791492            pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx       & ~RT_BIT(0))
    14801493                                          | (pVmcbNstGstCache->u16InterceptRdCRx &  RT_BIT(0));
     
    15591572    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    15601573    {
    1561         uint64_t u64GuestCR4 = pCtx->cr4;
    1562         Assert(RT_HI_U32(u64GuestCR4) == 0);
     1574        uint64_t uShadowCr4 = pCtx->cr4;
    15631575        if (!pVM->hm.s.fNestedPaging)
    15641576        {
     
    15711583
    15721584                case PGMMODE_32_BIT:        /* 32-bit paging. */
    1573                     u64GuestCR4 &= ~X86_CR4_PAE;
     1585                    uShadowCr4 &= ~X86_CR4_PAE;
    15741586                    break;
    15751587
     
    15771589                case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
    15781590                    /** Must use PAE paging as we could use physical memory > 4 GB */
    1579                     u64GuestCR4 |= X86_CR4_PAE;
     1591                    uShadowCr4 |= X86_CR4_PAE;
    15801592                    break;
    15811593
     
    15951607        }
    15961608
    1597         pVmcb->guest.u64CR4 = u64GuestCR4;
    1598         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1599 
    16001609        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    1601         pVCpu->hm.s.fLoadSaveGuestXcr0 = (u64GuestCR4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     1610        pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     1611
     1612        /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
     1613        if (uShadowCr4 == pCtx->cr4)
     1614        {
     1615            if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     1616                pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
     1617            else
     1618            {
     1619                /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
     1620                PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
     1621                pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx       & ~RT_BIT(4))
     1622                                              | (pVmcbNstGstCache->u16InterceptRdCRx &  RT_BIT(4));
     1623            }
     1624        }
     1625        else
     1626            pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
     1627
     1628        /* CR4 writes are always intercepted (both guest, nested-guest) from tracking PGM mode changes. */
     1629        Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
     1630
     1631        /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
     1632        Assert(RT_HI_U32(uShadowCr4) == 0);
     1633        pVmcb->guest.u64CR4 = uShadowCr4;
     1634        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
    16021635
    16031636        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
     
    20212054        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
    20222055
    2023         /* Always intercept CR0, CR4 reads and writes as we alter them. */
    2024         pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(0) | RT_BIT(4);
    2025         pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(0) | RT_BIT(4);
     2056        /* Always intercept CR4 writes for tracking PGM mode changes. */
     2057        pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4);
    20262058
    20272059        /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
     
    29733005
    29743006    PSVMVMCBCTRL         pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    2975     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    2976     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);
     3007    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
    29773008
    29783009    /*
     
    42784309    RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;
    42794310    if (   pSvmTransient->fUpdateTscOffsetting
    4280         || idCurrentCpu != pVCpu->hm.s.idLastCpu)
     4311        || idCurrentCpu != pVCpu->hm.s.idLastCpu)   /** @todo is this correct for nested-guests where
     4312                                                              nested-VCPU<->physical-CPU mapping doesn't exist. */
    42814313    {
    42824314        hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst);
     
    45354567
    45364568    /* TSC read must be done early for maximum accuracy. */
    4537     PSVMVMCB             pVmcbNstGst     = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    4538     PSVMVMCBCTRL         pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    4539     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     4569    PSVMVMCB             pVmcbNstGst      = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     4570    PSVMVMCBCTRL         pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     4571    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx);
    45404572    if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
    45414573    {
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette