- Timestamp:
- Mar 26, 2018 6:07:57 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 121476
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71476 r71504 952 952 | SVM_CTRL_INTERCEPT_VMMCALL; 953 953 954 /* 955 * CR0, CR4 reads/writes must be intercepted, as our shadow values may differ from the guest's. 956 * These interceptions might be relaxed later during VM execution if the conditions allow. 957 */ 958 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4); 959 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4); 954 /* CR4 writes must always be intercepted for tracking PGM mode changes. */ 955 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(4); 960 956 961 957 /* Intercept all DRx reads and writes by default. Changed later on. */ … … 1085 1081 #endif 1086 1082 return pVCpu->hm.s.svm.pVmcb; 1083 } 1084 1085 1086 /** 1087 * Gets a pointer to the nested-guest VMCB cache. 1088 * 1089 * @returns Pointer to the nested-guest VMCB cache. 1090 * @param pVCpu The cross context virtual CPU structure. 1091 * @param pCtx Pointer to the guest-CPU context. 1092 */ 1093 DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu, PCPUMCTX pCtx) 1094 { 1095 #ifdef VBOX_WITH_NESTED_HWVIRT 1096 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx); 1097 return &pVCpu->hm.s.svm.NstGstVmcbCache; 1098 #else 1099 RT_NOREF2(pVCpu, pCtx); 1100 return NULL; 1101 #endif 1087 1102 } 1088 1103 … … 1393 1408 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1394 1409 { 1395 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 1396 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 1410 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx); 1397 1411 fRemoveXcpt = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(u32Xcpt)); 1398 1412 } … … 1475 1489 { 1476 1490 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */ 1477 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 1478 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 1491 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx); 1479 1492 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0)) 1480 1493 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0)); … … 1559 1572 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 1560 1573 { 1561 uint64_t u64GuestCR4 = pCtx->cr4; 1562 Assert(RT_HI_U32(u64GuestCR4) == 0); 1574 uint64_t uShadowCr4 = pCtx->cr4; 1563 1575 if (!pVM->hm.s.fNestedPaging) 1564 1576 { … … 1571 1583 1572 1584 case PGMMODE_32_BIT: /* 32-bit paging. */ 1573 u 64GuestCR4 &= ~X86_CR4_PAE;1585 uShadowCr4 &= ~X86_CR4_PAE; 1574 1586 break; 1575 1587 … … 1577 1589 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1578 1590 /** Must use PAE paging as we could use physical memory > 4 GB */ 1579 u 64GuestCR4 |= X86_CR4_PAE;1591 uShadowCr4 |= X86_CR4_PAE; 1580 1592 break; 1581 1593 … … 1595 1607 } 1596 1608 1597 pVmcb->guest.u64CR4 = u64GuestCR4;1598 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1599 1600 1609 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1601 pVCpu->hm.s.fLoadSaveGuestXcr0 = (u64GuestCR4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1610 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1611 1612 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */ 1613 if (uShadowCr4 == pCtx->cr4) 1614 { 1615 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1616 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4); 1617 else 1618 { 1619 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */ 1620 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx); 1621 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4)) 1622 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4)); 1623 } 1624 } 1625 else 1626 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4); 1627 1628 /* CR4 writes are always intercepted (both guest, nested-guest) from tracking PGM mode changes. */ 1629 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4)); 1630 1631 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */ 1632 Assert(RT_HI_U32(uShadowCr4) == 0); 1633 pVmcb->guest.u64CR4 = uShadowCr4; 1634 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS); 1602 1635 1603 1636 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); … … 2021 2054 pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx; 2022 2055 2023 /* Always intercept CR0, CR4 reads and writes as we alter them. */ 2024 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(0) | RT_BIT(4); 2025 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(0) | RT_BIT(4); 2056 /* Always intercept CR4 writes for tracking PGM mode changes. */ 2057 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4); 2026 2058 2027 2059 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */ … … 2973 3005 2974 3006 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2975 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2976 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx); 3007 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx); 2977 3008 2978 3009 /* … … 4278 4309 RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu; 4279 4310 if ( pSvmTransient->fUpdateTscOffsetting 4280 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 4311 || idCurrentCpu != pVCpu->hm.s.idLastCpu) /** @todo is this correct for nested-guests where 4312 nested-VCPU<->physical-CPU mapping doesn't exist. */ 4281 4313 { 4282 4314 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst); … … 4535 4567 4536 4568 /* TSC read must be done early for maximum accuracy. */ 4537 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);4538 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;4539 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;4569 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4570 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4571 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx); 4540 4572 if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4541 4573 {
Note:
See TracChangeset
for help on using the changeset viewer.