Changeset 49664 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Nov 26, 2013 3:53:35 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49580 r49664 89 89 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 90 90 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15) 91 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(16) 92 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(17) 91 #define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16) 92 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17) 93 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(18) 93 94 #define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \ 94 95 | HMVMX_UPDATED_GUEST_RSP \ … … 107 108 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 108 109 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 110 | HMVMX_UPDATED_GUEST_LAZY_MSRS \ 109 111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \ 110 112 | HMVMX_UPDATED_GUEST_APIC_STATE) … … 1376 1378 * Updates the value of all host MSRs in the auto-load/store area in the VMCS. 1377 1379 * 1378 * @param pVCpu Pointer to the VMCPU. 1380 * @param pVCpu Pointer to the VMCPU. 1381 * 1382 * @remarks No-long-jump zone!!! 1379 1383 */ 1380 1384 static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu) … … 1395 1399 1396 1400 1401 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1402 /** 1403 * Saves a set of host MSRs to allow read/write passthru access to the guest and 1404 * perform lazy restoration of the host MSRs while leaving VT-x. 1405 * 1406 * @param pVCpu Pointer to the VMCPU. 1407 * 1408 * @remarks No-long-jump zone!!! 1409 */ 1410 static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu) 1411 { 1412 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1413 1414 #define VMXLOCAL_SAVE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \ 1415 do { \ 1416 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag))) \ 1417 { \ 1418 pVCpu->hm.s.vmx.u64Host##a_HostMsrField = ASMRdMsr(uMsr); \ 1419 Log4(("hmR0VmxLazySaveHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \ 1420 } \ 1421 } while (0) 1422 1423 /* 1424 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls(). 1425 */ 1426 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1427 VMXLOCAL_SAVE_HOST_MSR(MSR_K6_STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1428 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_SF_MASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1429 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1430 #undef VMXLOCAL_SAVE_HOST_MSR 1431 } 1432 1433 1434 /** 1435 * Checks whether the MSR belongs to the set of guest MSRs that we restore 1436 * lazily while leaving VT-x. 1437 * 1438 * @returns true if it does, false otherwise. 1439 * @param pVCpu Pointer to the VMCPU. 1440 * @param uMsr The MSR to check. 1441 */ 1442 static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr) 1443 { 1444 NOREF(pVCpu); 1445 switch (uMsr) 1446 { 1447 case MSR_K8_LSTAR: 1448 case MSR_K6_STAR: 1449 case MSR_K8_SF_MASK: 1450 case MSR_K8_KERNEL_GS_BASE: 1451 return true; 1452 } 1453 return false; 1454 } 1455 1456 1457 /** 1458 * Saves a set of guests MSRs back into the guest-CPU context. 1459 * 1460 * @param pVCpu Pointer to the VMCPU. 1461 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 1462 * out-of-sync. Make sure to update the required fields 1463 * before using them. 1464 * 1465 * @remarks No-long-jump zone!!! 1466 */ 1467 static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 1468 { 1469 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1470 #define VMXLOCAL_SAVE_GUEST_MSR(uMsr, a_GuestMsrField, RestoreFlag) \ 1471 do { \ 1472 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1473 { \ 1474 pMixedCtx->msr##a_GuestMsrField = ASMRdMsr(uMsr); \ 1475 Log4(("hmR0VmxLazySaveGuestMsrs: uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \ 1476 } \ 1477 } while (0) 1478 1479 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_LSTAR, LSTAR, VMX_RESTORE_HOST_MSR_LSTAR); 1480 VMXLOCAL_SAVE_GUEST_MSR(MSR_K6_STAR, STAR, VMX_RESTORE_HOST_MSR_STAR); 1481 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, VMX_RESTORE_HOST_MSR_SFMASK); 1482 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1483 1484 #undef VMXLOCAL_SAVE_GUEST_MSR 1485 } 1486 1487 1488 /** 1489 * Loads a set of guests MSRs to allow read/passthru to the guest. 1490 * 1491 * The name of this function is slightly confusing. This function does NOT 1492 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a 1493 * common prefix for functions dealing with "lazy restoration" of the shared 1494 * MSRs. 1495 * 1496 * @param pVCpu Pointer to the VMCPU. 1497 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 1498 * out-of-sync. Make sure to update the required fields 1499 * before using them. 1500 * 1501 * @remarks No-long-jump zone!!! 1502 */ 1503 static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 1504 { 1505 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1506 1507 #define VMXLOCAL_LOAD_GUEST_MSR(uMsr, a_GuestMsrField, a_HostMsrField, RestoreFlag) \ 1508 do { \ 1509 if ( (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1510 || pMixedCtx->msr##a_GuestMsrField != pVCpu->hm.s.vmx.u64Host##a_HostMsrField) \ 1511 { \ 1512 ASMWrMsr((uMsr), pMixedCtx->msr##a_GuestMsrField); \ 1513 } \ 1514 pVCpu->hm.s.vmx.fRestoreHostMsrs |= (RestoreFlag); \ 1515 Log4(("Load: MSRSWAP uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \ 1516 } while (0) 1517 1518 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1519 VMXLOCAL_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1520 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1521 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1522 1523 #undef VMXLOCAL_LOAD_GUEST_MSR 1524 } 1525 1526 1527 /** 1528 * Performs lazy restoration of the set of host MSRs if they were previously 1529 * loaded with guest MSR values. 1530 * 1531 * @param pVCpu Pointer to the VMCPU. 1532 * 1533 * @remarks No-long-jump zone!!! 1534 * @remarks The guest MSRs should have been saved back into the guest-CPU 1535 * context by hmR0VmxSaveGuestLazyMsrs()!!! 1536 */ 1537 static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu) 1538 { 1539 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1540 1541 #define VMXLOCAL_RESTORE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \ 1542 do { \ 1543 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1544 { \ 1545 ASMWrMsr((uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField); \ 1546 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(RestoreFlag); \ 1547 Log4(("hmR0VmxLazyRestoreHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), \ 1548 pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \ 1549 } \ 1550 } while (0) 1551 1552 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1553 VMXLOCAL_RESTORE_HOST_MSR(MSR_K6_STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1554 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_SF_MASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1555 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1556 1557 #undef VMXLOCAL_RESTORE_HOST_MSR 1558 } 1559 #endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 1560 1561 1397 1562 #ifdef VBOX_STRICT 1398 1563 /** … … 1400 1565 * VMCS are correct. 1401 1566 * 1402 * @param pVCpu 1567 * @param pVCpu Pointer to the VMCPU. 1403 1568 */ 1404 1569 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu) … … 2171 2336 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2172 2337 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2338 2339 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2340 /* 2341 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests. 2342 */ 2343 if ( HMVMX_IS_64BIT_HOST_MODE() 2344 && pVM->hm.s.fAllow64BitGuests) 2345 { 2346 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2347 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2348 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2349 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2350 } 2351 #endif 2173 2352 } 2174 2353 … … 2823 3002 * @param pVM Pointer to the VM. 2824 3003 * @param pVCpu Pointer to the VMCPU. 3004 * 3005 * @remarks No-long-jump zone!!! 2825 3006 */ 2826 3007 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu) … … 2832 3013 2833 3014 int rc = VINF_SUCCESS; 2834 #if 0 2835 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 2836 uint32_t cHostMsrs = 0; 2837 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 2838 2839 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 2840 { 2841 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER); 2842 2843 # if HC_ARCH_BITS == 64 2844 /* Paranoia. 64-bit code requires these bits to be set always. */ 2845 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)); 2846 2847 /* 2848 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation. 2849 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for 2850 * some reason (e.g. allow transparent reads) we would activate the code below. 2851 */ 2852 # if 0 2853 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */ 2854 Assert(u64HostEfer & (MSR_K6_EFER_NXE)); 2855 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has. 2856 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */ 2857 if (CPUMIsGuestInLongMode(pVCpu)) 2858 { 2859 uint64_t u64GuestEfer; 2860 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer); 2861 AssertRC(rc); 2862 2863 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE)) 2864 { 2865 pHostMsr->u32Msr = MSR_K6_EFER; 2866 pHostMsr->u32Reserved = 0; 2867 pHostMsr->u64Value = u64HostEfer; 2868 pHostMsr++; cHostMsrs++; 2869 } 2870 } 2871 # endif 2872 # else /* HC_ARCH_BITS != 64 */ 2873 pHostMsr->u32Msr = MSR_K6_EFER; 2874 pHostMsr->u32Reserved = 0; 2875 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2876 if (CPUMIsGuestInLongMode(pVCpu)) 2877 { 2878 /* Must match the EFER value in our 64-bit switcher. */ 2879 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE; 2880 } 2881 else 2882 # endif 2883 pHostMsr->u64Value = u64HostEfer; 2884 pHostMsr++; cHostMsrs++; 2885 # endif /* HC_ARCH_BITS == 64 */ 2886 } 2887 2888 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2889 if (HMVMX_IS_64BIT_HOST_MODE()) 2890 { 2891 pHostMsr->u32Msr = MSR_K6_STAR; 2892 pHostMsr->u32Reserved = 0; 2893 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */ 2894 pHostMsr++; cHostMsrs++; 2895 pHostMsr->u32Msr = MSR_K8_LSTAR; 2896 pHostMsr->u32Reserved = 0; 2897 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */ 2898 pHostMsr++; cHostMsrs++; 2899 pHostMsr->u32Msr = MSR_K8_SF_MASK; 2900 pHostMsr->u32Reserved = 0; 2901 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */ 2902 pHostMsr++; cHostMsrs++; 2903 pHostMsr->u32Msr = MSR_K8_KERNEL_GS_BASE; 2904 pHostMsr->u32Reserved = 0; 2905 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 2906 pHostMsr++; cHostMsrs++; 2907 } 2908 # endif 2909 2910 /* Host TSC AUX MSR must be restored since we always load/store guest TSC AUX MSR. */ 2911 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2912 { 2913 pHostMsr->u32Msr = MSR_K8_TSC_AUX; 2914 pHostMsr->u32Reserved = 0; 2915 pHostMsr->u64Value = ASMRdMsr(MSR_K8_TSC_AUX); 2916 pHostMsr++; cHostMsrs++; 2917 } 2918 2919 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 2920 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc))) 2921 { 2922 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc))); 2923 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE; 2924 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2925 } 2926 2927 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 3015 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3016 if ( HMVMX_IS_64BIT_HOST_MODE() 3017 && pVM->hm.s.fAllow64BitGuests) 3018 { 3019 hmR0VmxLazySaveHostMsrs(pVCpu); 3020 } 2928 3021 #endif 2929 3022 … … 4359 4452 4360 4453 /* 4361 * SharedMSRs that we use the auto-load/store MSR area in the VMCS.4454 * MSRs that we use the auto-load/store MSR area in the VMCS. 4362 4455 */ 4363 int rc = VINF_SUCCESS;4456 PVM pVM = pVCpu->CTX_SUFF(pVM); 4364 4457 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4365 4458 { 4366 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */ 4367 PVM pVM = pVCpu->CTX_SUFF(pVM); 4368 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 4369 if (fSupportsLongMode) 4370 { 4459 if (pVM->hm.s.fAllow64BitGuests) 4460 { 4461 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4371 4462 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */); 4372 4463 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */); 4373 4464 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */); 4374 4465 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */); 4375 }4376 4377 4466 # ifdef DEBUG 4378 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;4379 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)4380 Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));4467 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 4468 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 4469 Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4381 4470 # endif 4471 #endif 4472 } 4382 4473 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4383 4474 } … … 4390 4481 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4391 4482 { 4392 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);4483 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4393 4484 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4394 4485 } … … 4396 4487 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4397 4488 { 4398 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);4489 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4399 4490 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4400 4491 } … … 4402 4493 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4403 4494 { 4404 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);4495 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4405 4496 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4406 4497 } 4407 4498 4408 return rc;4499 return VINF_SUCCESS; 4409 4500 } 4410 4501 … … 4705 4796 { 4706 4797 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER))); 4798 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 4799 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 4707 4800 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR))); 4708 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));4709 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));4710 4801 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 4711 4802 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE))); … … 5739 5830 5740 5831 /** 5741 * Saves the auto load/store'd guest MSRs from the current VMCS into the 5742 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE 5743 * and TSC_AUX. 5832 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from 5833 * the CPU back into the guest-CPU context. 5744 5834 * 5745 5835 * @returns VBox status code. … … 5751 5841 * @remarks No-long-jump zone!!! 5752 5842 */ 5843 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5844 { 5845 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS) 5846 return VINF_SUCCESS; 5847 5848 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 5849 if ( HMVMX_IS_64BIT_HOST_MODE() 5850 && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 5851 { 5852 /* We should not get preempted to a different CPU at this point while reading the MSRs. */ 5853 VMMRZCallRing3Disable(pVCpu); 5854 HM_DISABLE_PREEMPT_IF_NEEDED(); 5855 5856 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 5857 5858 HM_RESTORE_PREEMPT_IF_NEEDED(); 5859 VMMRZCallRing3Enable(pVCpu); 5860 } 5861 #endif 5862 5863 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS; 5864 return VINF_SUCCESS; 5865 } 5866 5867 5868 /** 5869 * Saves the auto load/store'd guest MSRs from the current VMCS into 5870 * the guest-CPU context. 5871 * 5872 * @returns VBox status code. 5873 * @param pVCpu Pointer to the VMCPU. 5874 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 5875 * out-of-sync. Make sure to update the required fields 5876 * before using them. 5877 * 5878 * @remarks No-long-jump zone!!! 5879 */ 5753 5880 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5754 5881 { … … 5756 5883 return VINF_SUCCESS; 5757 5884 5758 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 5759 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs)); 5760 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 5885 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 5886 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 5887 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs)); 5888 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 5761 5889 { 5762 5890 switch (pMsr->u32Msr) 5763 5891 { 5892 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break; 5764 5893 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5765 5894 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5766 5895 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5767 5896 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break; 5768 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;5769 5897 default: 5770 5898 { … … 6176 6304 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 6177 6305 6306 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 6307 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 6308 6178 6309 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 6179 6310 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); … … 6444 6575 * 6445 6576 * @remarks If you modify code here, make sure to check whether 6446 * hmR0VmxCallRing3Callback() needs to be updated too .6577 * hmR0VmxCallRing3Callback() needs to be updated too!!! 6447 6578 * @remarks No-long-jmp zone!!! 6448 6579 */ … … 6496 6627 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6497 6628 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6629 } 6630 #endif 6631 6632 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 6633 /* Restore the host MSRs as we're leaving VT-x context. */ 6634 if ( HMVMX_IS_64BIT_HOST_MODE() 6635 && pVM->hm.s.fAllow64BitGuests 6636 && pVCpu->hm.s.vmx.fRestoreHostMsrs) 6637 { 6638 /* We shouldn't reload the guest MSRs without saving it first. */ 6639 if (!fSaveGuestState) 6640 { 6641 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 6642 AssertRCReturn(rc, rc); 6643 } 6644 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS); 6645 hmR0VmxLazyRestoreHostMsrs(pVCpu); 6646 Assert(!pVCpu->hm.s.vmx.fRestoreHostMsrs); 6498 6647 } 6499 6648 #endif … … 6510 6659 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6511 6660 6512 /** @todo This kindadefeats the purpose of having preemption hooks.6661 /** @todo This partially defeats the purpose of having preemption hooks. 6513 6662 * The problem is, deregistering the hooks should be moved to a place that 6514 6663 * lasts until the EMT is about to be destroyed not everytime while leaving HM … … 6680 6829 * may be out-of-sync. Make sure to update the required 6681 6830 * fields before using them. 6831 * 6682 6832 * @remarks If you modify code here, make sure to check whether 6683 * hmR0VmxLeave() needs to be updated too .6833 * hmR0VmxLeave() needs to be updated too!!! 6684 6834 */ 6685 6835 DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser) … … 6687 6837 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION) 6688 6838 { 6839 /* If anything here asserts or fails, good luck. */ 6689 6840 VMMRZCallRing3RemoveNotification(pVCpu); 6841 VMMRZCallRing3Disable(pVCpu); 6690 6842 HM_DISABLE_PREEMPT_IF_NEEDED(); 6691 6843 6692 /* If anything here asserts or fails, good luck. */6844 PVM pVM = pVCpu->CTX_SUFF(pVM); 6693 6845 if (CPUMIsGuestFPUStateActive(pVCpu)) 6694 CPUMR0SaveGuestFPU(pV Cpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);6846 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser); 6695 6847 6696 6848 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); … … 6703 6855 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6704 6856 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6857 } 6858 #endif 6859 6860 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 6861 /* Restore the host MSRs as we're leaving VT-x context. */ 6862 if ( HMVMX_IS_64BIT_HOST_MODE() 6863 && pVM->hm.s.fAllow64BitGuests 6864 && pVCpu->hm.s.vmx.fRestoreHostMsrs) 6865 { 6866 hmR0VmxLazyRestoreHostMsrs(pVCpu); 6705 6867 } 6706 6868 #endif … … 7653 7815 } 7654 7816 7817 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS)) 7818 { 7819 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 7820 if ( HMVMX_IS_64BIT_HOST_MODE() 7821 && pVM->hm.s.fAllow64BitGuests) 7822 { 7823 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 7824 } 7825 #endif 7826 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 7827 } 7828 7655 7829 AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 7656 7830 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); … … 7907 8081 7908 8082 /* 7909 * Load the state shared between host and guest (FPU, debug ).8083 * Load the state shared between host and guest (FPU, debug, lazy MSRs). 7910 8084 */ 7911 8085 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) … … 8005 8179 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 8006 8180 { 8007 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)8008 {8009 /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area8010 if it could have changed without causing a VM-exit. */8011 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)8012 {8013 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);8014 AssertRC(rc2);8015 }8016 }8017 8018 8181 /** @todo Find a way to fix hardcoding a guestimate. */ 8019 8182 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() … … 9752 9915 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 9753 9916 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 9917 { 9918 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 9754 9919 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9920 } 9755 9921 AssertRCReturn(rc, rc); 9756 9922 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx)); … … 9794 9960 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 9795 9961 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 9962 { 9963 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 9796 9964 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9965 } 9797 9966 AssertRCReturn(rc, rc); 9798 9967 Log4(("ecx=%#RX32\n", pMixedCtx->ecx)); … … 9832 10001 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 9833 10002 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 10003 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10004 else if ( HMVMX_IS_64BIT_HOST_MODE() 10005 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10006 { 10007 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 10008 } 10009 #endif 9834 10010 break; 9835 10011 } … … 9852 10028 } 9853 10029 9854 /* Writes to MSRs that are part of the auto-load/store are shouldn't cause VM-exits 9855 when MSR-bitmaps are supported. */ 10030 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */ 9856 10031 default: 9857 10032 { … … 9862 10037 HMVMX_RETURN_UNEXPECTED_EXIT(); 9863 10038 } 10039 10040 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10041 if ( HMVMX_IS_64BIT_HOST_MODE() 10042 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10043 { 10044 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx)); 10045 HMVMX_RETURN_UNEXPECTED_EXIT(); 10046 } 10047 #endif 9864 10048 break; 9865 10049 } … … 10083 10267 if (fIOString) 10084 10268 { 10085 #if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */10269 #if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */ 10086 10270 /* 10087 10271 * INS/OUTS - I/O String instruction. … … 10471 10655 10472 10656 /* 10473 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date , see10474 * hmR0VmxSaveGuestAutoLoadStoreMsrs().Update the segment registers and DR7 from the CPU.10657 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date. 10658 * Update the segment registers and DR7 from the CPU. 10475 10659 */ 10476 10660 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); … … 10626 10810 } 10627 10811 10628 Log4(("EPT return to ring-3 rc=% d\n"));10812 Log4(("EPT return to ring-3 rc=%Rrc\n", rc)); 10629 10813 return rc; 10630 10814 }
Note:
See TracChangeset
for help on using the changeset viewer.