Changeset 42156 in vbox
- Timestamp:
- Jul 16, 2012 6:59:45 AM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac
r30414 r42156 404 404 %endif 405 405 406 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs 407 ; ; @todo use the automatic load feature forMSRs406 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 407 ; Save the host MSRs and load the guest MSRs 408 408 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 409 409 %if 0 ; not supported on Intel CPUs … … 412 412 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 413 413 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 414 %endif 415 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208} 414 416 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 415 417 … … 538 540 pop xSI ; pCtx (needed in rsi by the macros below) 539 541 540 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 541 ;; @todo use the automatic load feature for MSRs 542 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}. 542 543 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 544 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 545 ; Save the guest MSRs and load the host MSRs 543 546 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 544 547 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR … … 547 550 %endif 548 551 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR 552 %endif 549 553 550 554 %ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 609 613 pop xSI ; pCtx (needed in rsi by the macros below) 610 614 611 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 612 ;; @todo use the automatic load feature for MSRs 615 ; Kernel GS base is special, we need to manually load/store it See @bugref{6208}. 613 616 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 617 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 618 ; Load the host MSRs 614 619 LOADHOSTMSR MSR_K8_SF_MASK 615 620 LOADHOSTMSR MSR_K6_STAR … … 618 623 %endif 619 624 LOADHOSTMSR MSR_K8_LSTAR 625 %endif 620 626 621 627 %ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 656 662 pop xSI ; pCtx (needed in rsi by the macros below) 657 663 658 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 659 ;; @todo use the automatic load feature for MSRs 664 ; Kernel GS base is special, load it manually. See @bugref{6208}. 660 665 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 666 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 667 ; Load the host MSRs 661 668 LOADHOSTMSR MSR_K8_SF_MASK 662 669 LOADHOSTMSR MSR_K6_STAR … … 665 672 %endif 666 673 LOADHOSTMSR MSR_K8_LSTAR 674 %endif 667 675 668 676 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r42056 r42156 210 210 211 211 #ifdef LOG_ENABLED 212 SUPR0Printf("VMXR0InitVM % x\n", pVM);212 SUPR0Printf("VMXR0InitVM %p\n", pVM); 213 213 #endif 214 214 … … 626 626 AssertRC(rc); 627 627 628 /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */ 628 /* 629 * Allow the guest to directly modify these MSRs; they are loaded/stored automatically 630 * using MSR-load/store areas in the VMCS. 631 */ 629 632 hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true); 630 633 hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true); … … 636 639 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true); 637 640 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true); 641 if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 642 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true); 638 643 } 639 644 … … 647 652 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys); 648 653 AssertRC(rc); 649 650 654 Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys); 651 655 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pHostMSRPhys); … … 656 660 AssertRC(rc); 657 661 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0); 662 AssertRC(rc); 663 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0); 658 664 AssertRC(rc); 659 665 … … 1319 1325 * Check if EFER MSR present. 1320 1326 */ 1321 if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1322 { 1323 if (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 1327 uint32_t u32HostExtFeatures = ASMCpuId_EDX(0x80000001); 1328 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1329 { 1330 if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 1324 1331 { 1325 1332 pMsr->u32IndexMSR = MSR_K6_STAR; … … 1354 1361 pMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */ 1355 1362 pMsr++; idxMsr++; 1363 1364 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 1365 #if 0 1356 1366 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 1357 1367 pMsr->u32Reserved = 0; 1358 1368 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 1359 1369 pMsr++; idxMsr++; 1370 #endif 1360 1371 } 1361 1372 # endif 1373 1374 if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 1375 { 1376 Assert(u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1377 pMsr->u32IndexMSR = MSR_K8_TSC_AUX; 1378 pMsr->u32Reserved = 0; 1379 pMsr->u64Value = ASMRdMsr(MSR_K8_TSC_AUX); 1380 pMsr++; idxMsr++; 1381 } 1382 1383 /** @todo r=ramshankar: check IA32_VMX_MISC bits 27:25 for valid idxMsr 1384 * range. */ 1362 1385 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr); 1363 1386 AssertRC(rc); 1387 1388 pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr; 1364 1389 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 1365 1390 … … 2093 2118 2094 2119 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2095 /* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */ 2120 /* 2121 * Store all guest MSRs in the VM-entry load area, so they will be loaded 2122 * during the world switch. 2123 */ 2096 2124 PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR; 2097 2125 unsigned idxMsr = 0; 2098 2126 2099 uint32_t ulEdx; 2100 uint32_t ulTemp; 2101 CPUMGetGuestCpuId(pVCpu, 0x80000001, &ulTemp, &ulTemp, &ulTemp, &ulEdx); 2102 /* EFER MSR present? */ 2103 if (ulEdx & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 2127 uint32_t u32GstExtFeatures; 2128 uint32_t u32Temp; 2129 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Temp, &u32Temp, &u32Temp, &u32GstExtFeatures); 2130 2131 /* 2132 * Check if EFER MSR present. 2133 */ 2134 if (u32GstExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 2104 2135 { 2105 2136 pMsr->u32IndexMSR = MSR_K6_EFER; … … 2111 2142 pMsr++; idxMsr++; 2112 2143 2113 if (u lEdx& X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)2144 if (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 2114 2145 { 2115 2146 pMsr->u32IndexMSR = MSR_K8_LSTAR; … … 2125 2156 pMsr->u64Value = pCtx->msrSFMASK; /* syscall flag mask */ 2126 2157 pMsr++; idxMsr++; 2158 2159 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2160 #if 0 2127 2161 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2128 2162 pMsr->u32Reserved = 0; 2129 2163 pMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */ 2130 2164 pMsr++; idxMsr++; 2131 } 2132 } 2133 pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr; 2165 #endif 2166 } 2167 } 2168 2169 if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2170 { 2171 Assert(u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 2172 pMsr->u32IndexMSR = MSR_K8_TSC_AUX; 2173 pMsr->u32Reserved = 0; 2174 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pMsr->u64Value); 2175 AssertRC(rc); 2176 pMsr++; idxMsr++; 2177 } 2178 2179 /* 2180 * The number of host MSRs saved must be identical to the number of guest MSRs loaded. 2181 * It's not a VT-x requirement but how it's practically used here. 2182 */ 2183 Assert(pVCpu->hwaccm.s.vmx.cCachedMSRs == idxMsr); 2134 2184 2135 2185 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr); … … 2327 2377 2328 2378 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2329 /* Save the possibly changed MSRs that we automatically restore and save during a world switch. */ 2379 /* 2380 * Save the possibly changed MSRs that we automatically restore and save during a world switch. 2381 */ 2330 2382 for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++) 2331 2383 { … … 2344 2396 pCtx->msrSFMASK = pMsr->u64Value; 2345 2397 break; 2398 2399 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2400 #if 0 2346 2401 case MSR_K8_KERNEL_GS_BASE: 2347 2402 pCtx->msrKERNELGSBASE = pMsr->u64Value; 2348 2403 break; 2404 #endif 2405 case MSR_K8_TSC_AUX: 2406 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); 2407 break; 2408 2349 2409 case MSR_K6_EFER: 2350 2410 /* EFER can't be changed without causing a VM-exit. */ … … 2967 3027 pCtx->msrLSTAR = u8LastTPR; 2968 3028 3029 /** @todo r=ramshankar: we should check for MSR-bitmap support here. */ 2969 3030 if (fPending) 2970 3031 { … … 3099 3160 TMNotifyStartOfExecution(pVCpu); 3100 3161 3162 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3101 3163 /* 3102 3164 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that … … 3112 3174 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTSCAux); 3113 3175 } 3176 #endif 3114 3177 3115 3178 #ifdef VBOX_WITH_KERNEL_USING_XMM … … 3121 3184 ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits); 3122 3185 3186 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3123 3187 /* 3124 3188 * Restore host's TSC_AUX. … … 3129 3193 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.vmx.u64HostTSCAux); 3130 3194 } 3195 #endif 3131 3196 3132 3197 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ … … 5172 5237 if (VMX_IS_64BIT_HOST_MODE()) 5173 5238 { 5174 Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER))); 5175 Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR))); 5176 Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 5177 Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 5178 Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 5239 Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER))); 5240 Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR))); 5241 Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 5242 Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 5243 Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 5244 Log(("MSR_K8_KERNEL_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE))); 5179 5245 } 5180 5246 # endif -
trunk/src/VBox/VMM/include/HWACCMInternal.h
r42056 r42156 42 42 43 43 44 #if 0 45 /* Seeing somewhat random behaviour on my Nehalem system with auto-save of guest MSRs; 46 * for some strange reason the CPU doesn't save the MSRs during the VM-exit. 47 * Clearly visible with a dual VCPU configured OpenSolaris 200906 live cd VM. 44 /* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we 45 * handle this MSR manually. See @bugref{6208}. This is clearly visible while 46 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. 48 47 * 49 * Note: change the assembly files when enabling this! (remove the manual auto load/save)48 * Note: don't forget to update the assembly files while modifying this! 50 49 */ 51 50 #define VBOX_WITH_AUTO_MSR_LOAD_RESTORE 52 #endif53 51 54 52 RT_C_DECLS_BEGIN … … 626 624 /** Virtual address of the MSR load area (1 page). */ 627 625 R0PTRTYPE(uint8_t *) pHostMSR; 628 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */629 626 630 627 /* Number of automatically loaded/restored MSRs. */ 631 628 uint32_t cCachedMSRs; 632 629 uint32_t uAlignement; 630 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 633 631 634 632 /* Host's IA32_TSC_AUX MSR (for RDTSCP in VMX non-root). */ -
trunk/src/VBox/VMM/include/HWACCMInternal.mac
r37323 r42156 16 16 17 17 %define VMX_USE_CACHED_VMCS_ACCESSES 18 %define VBOX_WITH_AUTO_MSR_LOAD_RESTORE 18 19 19 20 ;Maximum number of cached entries.
Note:
See TracChangeset
for help on using the changeset viewer.