Changeset 61359 in vbox
- Timestamp:
- Jun 1, 2016 7:51:27 AM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 107655
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r61317 r61359 1458 1458 1459 1459 1460 #if HC_ARCH_BITS == 641461 1460 /** 1462 1461 * Saves a set of host MSRs to allow read/write passthru access to the guest and … … 1474 1473 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls(). 1475 1474 */ 1476 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 1477 { 1478 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR); 1479 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR); 1480 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK); 1481 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1475 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); 1476 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST)) 1477 { 1478 #if HC_ARCH_BITS == 64 1479 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1480 { 1481 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR); 1482 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR); 1483 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK); 1484 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1485 } 1486 #endif 1482 1487 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST; 1483 1488 } … … 1496 1501 { 1497 1502 NOREF(pVCpu); 1498 switch (uMsr) 1499 { 1500 case MSR_K8_LSTAR: 1501 case MSR_K6_STAR: 1502 case MSR_K8_SF_MASK: 1503 case MSR_K8_KERNEL_GS_BASE: 1504 return true; 1505 } 1503 #if HC_ARCH_BITS == 64 1504 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1505 { 1506 switch (uMsr) 1507 { 1508 case MSR_K8_LSTAR: 1509 case MSR_K6_STAR: 1510 case MSR_K8_SF_MASK: 1511 case MSR_K8_KERNEL_GS_BASE: 1512 return true; 1513 } 1514 } 1515 #endif 1506 1516 return false; 1507 1517 } … … 1526 1536 { 1527 1537 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST); 1528 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 1529 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 1530 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 1531 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1538 #if HC_ARCH_BITS == 64 1539 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1540 { 1541 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 1542 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 1543 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 1544 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1545 } 1546 #endif 1532 1547 } 1533 1548 } … … 1565 1580 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 1566 1581 { 1567 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar); 1568 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star); 1569 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask); 1570 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase); 1582 #if HC_ARCH_BITS == 64 1583 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1584 { 1585 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar); 1586 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star); 1587 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask); 1588 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase); 1589 } 1590 #endif 1571 1591 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST; 1572 }1573 else1574 {1575 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);1576 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);1577 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);1578 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);1579 1592 } 1580 1593 … … 1601 1614 { 1602 1615 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST); 1603 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr); 1604 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr); 1605 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr); 1606 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr); 1616 #if HC_ARCH_BITS == 64 1617 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1618 { 1619 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr); 1620 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr); 1621 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr); 1622 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr); 1623 } 1624 #endif 1607 1625 } 1608 1626 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST); 1609 1627 } 1610 #endif /* HC_ARCH_BITS == 64 */1611 1628 1612 1629 … … 2431 2448 } 2432 2449 #endif 2450 /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */ 2433 2451 } 2434 2452 … … 3095 3113 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); 3096 3114 3097 int rc = VINF_SUCCESS;3098 #if HC_ARCH_BITS == 64 3099 if (pVM->hm.s.fAllow64BitGuests)3100 hmR0VmxLazySaveHostMsrs(pVCpu);3101 #endif 3115 /* 3116 * Save MSRs that we restore lazily (due to preemption or transition to ring-3) 3117 * rather than swapping them on every VM-entry. 3118 */ 3119 hmR0VmxLazySaveHostMsrs(pVCpu); 3102 3120 3103 3121 /* 3104 3122 * Host Sysenter MSRs. 3105 3123 */ 3106 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));3124 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3107 3125 #if HC_ARCH_BITS == 32 3108 3126 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); … … 6169 6187 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6170 6188 { 6171 #if HC_ARCH_BITS == 64 6172 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 6173 { 6174 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */ 6175 VMMRZCallRing3Disable(pVCpu); 6176 HM_DISABLE_PREEMPT(); 6177 6178 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */ 6179 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS)) 6180 { 6181 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 6182 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS); 6183 } 6184 6185 HM_RESTORE_PREEMPT(); 6186 VMMRZCallRing3Enable(pVCpu); 6187 } 6188 else 6189 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */ 6190 VMMRZCallRing3Disable(pVCpu); 6191 HM_DISABLE_PREEMPT(); 6192 6193 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */ 6194 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS)) 6195 { 6196 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 6189 6197 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS); 6190 #else 6191 NOREF(pMixedCtx); 6192 HM VMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);6193 #endif 6198 } 6199 6200 HM_RESTORE_PREEMPT(); 6201 VMMRZCallRing3Enable(pVCpu); 6194 6202 6195 6203 return VINF_SUCCESS; … … 7047 7055 #endif 7048 7056 7049 #if HC_ARCH_BITS == 647050 7057 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 7051 if ( pVM->hm.s.fAllow64BitGuests 7052 && pVCpu->hm.s.vmx.fLazyMsrs) 7058 if (pVCpu->hm.s.vmx.fLazyMsrs) 7053 7059 { 7054 7060 /* We shouldn't reload the guest MSRs without saving it first. */ … … 7062 7068 Assert(!pVCpu->hm.s.vmx.fLazyMsrs); 7063 7069 } 7064 #endif7065 7070 7066 7071 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ … … 7288 7293 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 7289 7294 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 7290 7295 #endif 7291 7296 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 7292 if ( pVCpu->hm.s.vmx.fLazyMsrs 7293 && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 7297 if (pVCpu->hm.s.vmx.fLazyMsrs) 7294 7298 hmR0VmxLazyRestoreHostMsrs(pVCpu); 7295 #endif 7299 7296 7300 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 7297 7301 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false; … … 8182 8186 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 8183 8187 8184 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 8185 return VINF_SUCCESS; 8186 8187 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 8188 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8189 8190 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 8191 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8192 8193 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 8194 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8195 8196 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 8188 int rc = VINF_SUCCESS; 8189 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 8190 { 8191 rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 8192 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8193 8194 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 8195 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8196 8197 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 8198 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8199 8200 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 8201 } 8197 8202 return rc; 8198 8203 } … … 8367 8372 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS)) 8368 8373 { 8369 #if HC_ARCH_BITS == 64 8370 if (pVM->hm.s.fAllow64BitGuests) 8371 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 8372 #endif 8374 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 8373 8375 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 8374 8376 } … … 11884 11886 HMVMX_RETURN_UNEXPECTED_EXIT(); 11885 11887 } 11886 # if HC_ARCH_BITS == 64 11887 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests 11888 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 11889 { 11890 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx)); 11891 HMVMX_RETURN_UNEXPECTED_EXIT(); 11892 } 11893 # endif 11888 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 11889 { 11890 VMXMSREXITREAD enmRead; 11891 VMXMSREXITWRITE enmWrite; 11892 int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite); 11893 AssertRCReturn(rc2, rc2); 11894 if (enmRead == VMXMSREXIT_PASSTHRU_READ) 11895 { 11896 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx)); 11897 HMVMX_RETURN_UNEXPECTED_EXIT(); 11898 } 11899 } 11894 11900 } 11895 11901 #endif … … 11974 11980 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 11975 11981 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 11976 #if HC_ARCH_BITS == 6411977 11982 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 11978 11983 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 11979 #endif11980 11984 break; 11981 11985 } … … 12012 12016 } 12013 12017 12014 #if HC_ARCH_BITS == 6412015 12018 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 12016 12019 { 12017 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx)); 12018 HMVMX_RETURN_UNEXPECTED_EXIT(); 12020 VMXMSREXITREAD enmRead; 12021 VMXMSREXITWRITE enmWrite; 12022 int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite); 12023 AssertRCReturn(rc2, rc2); 12024 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE) 12025 { 12026 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx)); 12027 HMVMX_RETURN_UNEXPECTED_EXIT(); 12028 } 12019 12029 } 12020 #endif12021 12030 break; 12022 12031 }
Note:
See TracChangeset
for help on using the changeset viewer.