Changeset 51222 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- May 9, 2014 8:11:15 AM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r51220 r51222 45 45 # define HMVMX_ALWAYS_SWAP_FPU_STATE 46 46 # define HMVMX_ALWAYS_FLUSH_TLB 47 # define HMVMX_ALWAYS_SWAP_EFER 47 48 #endif 48 49 … … 1303 1304 * auto-load/store MSR area in the VMCS. 1304 1305 * 1305 * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR1306 * area.1307 *1308 1306 * @returns VBox status code. 1309 1307 * @param pVCpu Pointer to the VMCPU. … … 1328 1326 /* Remove it by swapping the last MSR in place of it, and reducing the count. */ 1329 1327 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1330 pLastGuestMsr += cMsrs ;1328 pLastGuestMsr += cMsrs - 1; 1331 1329 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr; 1332 1330 pGuestMsr->u64Value = pLastGuestMsr->u64Value; … … 1334 1332 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1335 1333 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1336 pLastHostMsr += cMsrs ;1334 pLastHostMsr += cMsrs - 1; 1337 1335 pHostMsr->u32Msr = pLastHostMsr->u32Msr; 1338 1336 pHostMsr->u64Value = pLastHostMsr->u64Value; … … 1352 1350 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1353 1351 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1354 } 1355 1356 return VINF_SUCCESS; 1352 1353 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs)); 1354 return VINF_SUCCESS; 1355 } 1356 1357 return VERR_NOT_FOUND; 1357 1358 } 1358 1359 … … 1397 1398 { 1398 1399 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr); 1399 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1400 1401 /* 1402 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it. 1403 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}. 1404 */ 1405 if (pHostMsr->u32Msr == MSR_K6_EFER) 1406 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer; 1407 else 1408 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1400 1409 } 1401 1410 … … 1646 1655 { 1647 1656 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */ 1648 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 \n", pHostMsr->u32Msr,1649 pGuestMsr->u32Msr ));1657 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr, 1658 pGuestMsr->u32Msr, cMsrs)); 1650 1659 1651 1660 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr); 1652 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 \n", pHostMsr->u32Msr,1653 pHostMsr->u 64Value, u64Msr));1661 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", 1662 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs)); 1654 1663 1655 1664 /* Verify that the permissions are as expected in the MSR bitmap. */ … … 1660 1669 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite); 1661 1670 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc)); 1662 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 No passthru read permission!\n", 1663 pGuestMsr->u32Msr)); 1664 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 No passthru write permission!\n", 1665 pGuestMsr->u32Msr)); 1671 if (pGuestMsr->u32Msr == MSR_K6_EFER) 1672 { 1673 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n")); 1674 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n")); 1675 } 1676 else 1677 { 1678 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n", 1679 pGuestMsr->u32Msr, cMsrs)); 1680 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n", 1681 pGuestMsr->u32Msr, cMsrs)); 1682 } 1666 1683 } 1667 1684 } … … 3155 3172 3156 3173 /* 3174 * Host EFER MSR. 3157 3175 * If the CPU supports the newer VMCS controls for managing EFER, use it. 3176 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs(). 3158 3177 */ 3159 3178 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 3193 3212 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3194 3213 { 3214 #ifdef HMVMX_ALWAYS_SWAP_EFER 3215 return true; 3216 #endif 3195 3217 PVM pVM = pVCpu->CTX_SUFF(pVM); 3196 3218 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer; … … 3201 3223 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}. 3202 3224 */ 3203 if ( pVM->hm.s.fAllow64BitGuests3225 if ( CPUMIsGuestInLongMode(pVCpu) 3204 3226 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3205 3227 { … … 4655 4677 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4656 4678 { 4679 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */ 4657 4680 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4658 4681 if (pVM->hm.s.fAllow64BitGuests) … … 4698 4721 { 4699 4722 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4700 if ( HMVMX_IS_64BIT_HOST_MODE() 4701 && pVM->hm.s.vmx.fSupportsVmcsEfer 4702 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) /* Not really needed here, but avoids a VM-write as a nested guest. */ 4703 { 4704 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4705 AssertRCReturn(rc,rc); 4706 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER)); 4723 if (HMVMX_IS_64BIT_HOST_MODE()) 4724 { 4725 /* 4726 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option 4727 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}. 4728 */ 4729 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 4730 { 4731 /* Not strictly necessary to check hmR0VmxShouldSwapEferMsr() here, but it avoids 4732 one VM-write when we're a nested guest. */ 4733 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 4734 { 4735 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4736 AssertRCReturn(rc,rc); 4737 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER)); 4738 } 4739 } 4740 else 4741 { 4742 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 4743 { 4744 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */); 4745 /* We need to intercept reads too, see @bugref{7386} comment #16. */ 4746 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4747 Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER, 4748 pVCpu->hm.s.vmx.cMsrs)); 4749 } 4750 else 4751 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 4752 } 4707 4753 } 4708 4754 #endif … … 6137 6183 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 6138 6184 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break; 6185 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 6186 case MSR_K6_EFER: 6187 { 6188 if (HMVMX_IS_64BIT_HOST_MODE()) /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */ 6189 break; 6190 } 6191 #endif 6139 6192 default: 6140 6193 { 6141 Assert Failed();6194 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs)); 6142 6195 return VERR_HM_UNEXPECTED_LD_ST_MSR; 6143 6196 } … … 7115 7168 } 7116 7169 #endif 7170 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 7117 7171 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false; 7118 7172 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); … … 10291 10345 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 10292 10346 { 10293 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 10347 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx) 10348 && pMixedCtx->ecx != MSR_K6_EFER) 10294 10349 { 10295 10350 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx)); … … 10382 10437 case MSR_K8_FS_BASE: /* no break */ 10383 10438 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 10439 case MSR_K6_EFER: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); break; 10384 10440 default: 10385 10441 { … … 10415 10471 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 10416 10472 { 10473 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10474 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */ 10475 if ( HMVMX_IS_64BIT_HOST_MODE() 10476 && pMixedCtx->ecx == MSR_K6_EFER) 10477 { 10478 break; 10479 } 10480 #endif 10417 10481 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 10418 10482 pMixedCtx->ecx));
Note:
See TracChangeset
for help on using the changeset viewer.