Changeset 46294 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 28, 2013 12:07:56 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46289 r46294 2325 2325 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER); 2326 2326 2327 # if HC_ARCH_BITS == 642327 # if HC_ARCH_BITS == 64 2328 2328 /* Paranoia. 64-bit code requires these bits to be set always. */ 2329 2329 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)); 2330 2331 /* 2332 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation. 2333 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for 2334 * some reason (e.g. allow transparent reads) we would activate the code below. 2335 */ 2336 # if 0 2330 2337 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */ 2331 2338 Assert(u64HostEfer & (MSR_K6_EFER_NXE)); 2332 2333 2339 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has. 2334 2340 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */ … … 2347 2353 } 2348 2354 } 2349 #else /* HC_ARCH_BITS != 64 */ 2355 # endif 2356 # else /* HC_ARCH_BITS != 64 */ 2350 2357 pHostMsr->u32IndexMSR = MSR_K6_EFER; 2351 2358 pHostMsr->u32Reserved = 0; … … 2357 2364 } 2358 2365 else 2359 # endif2366 # endif 2360 2367 pHostMsr->u64Value = u64HostEfer; 2361 2368 pHostMsr++; cHostMsrs++; 2362 # endif /* HC_ARCH_BITS == 64 */2369 # endif /* HC_ARCH_BITS == 64 */ 2363 2370 } 2364 2371 … … 2400 2407 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2401 2408 AssertRCReturn(rc, rc); 2402 # 2409 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2403 2410 if (HMVMX_IS_64BIT_HOST_MODE()) 2404 2411 { … … 2413 2420 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2414 2421 } 2415 # 2422 #elif HC_ARCH_BITS == 32 2416 2423 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2417 2424 AssertRCReturn(rc, rc); 2418 2425 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2419 # 2426 #else 2420 2427 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2421 2428 AssertRCReturn(rc, rc); 2422 2429 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2423 # 2430 #endif 2424 2431 AssertRCReturn(rc, rc); 2425 2432 … … 3742 3749 3743 3750 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */ 3744 const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 3745 const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 3746 if (fSupportsNX || fSupportsLongMode) 3747 { 3748 /** @todo support save IA32_EFER, i.e. 3749 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, in which case the 3750 * guest EFER need not be part of the VM-entry MSR-load area. Also 3751 * allow the guest to read EFER without causing a VM-exit when 3752 * possible. */ 3753 /* Do -not- load guest EFER as we don't save/restore the host EFER always. See hmr0VmxSaveHostMsrs() */ 3754 #if 0 3755 pGuestMsr->u32IndexMSR = MSR_K6_EFER; 3751 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only 3752 * when the guest really is in 64-bit mode. */ 3753 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 3754 if (fSupportsLongMode) 3755 { 3756 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR; 3756 3757 pGuestMsr->u32Reserved = 0; 3757 pGuestMsr->u64Value = pMixedCtx->msrEFER; 3758 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */ 3759 if (!CPUMIsGuestInLongModeEx(pMixedCtx)) 3760 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME); 3758 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */ 3761 3759 pGuestMsr++; cGuestMsrs++; 3762 #endif 3763 if (fSupportsLongMode) 3764 { 3765 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR; 3766 pGuestMsr->u32Reserved = 0; 3767 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */ 3768 pGuestMsr++; cGuestMsrs++; 3769 pGuestMsr->u32IndexMSR = MSR_K6_STAR; 3770 pGuestMsr->u32Reserved = 0; 3771 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 3772 pGuestMsr++; cGuestMsrs++; 3773 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK; 3774 pGuestMsr->u32Reserved = 0; 3775 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 3776 pGuestMsr++; cGuestMsrs++; 3777 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 3778 pGuestMsr->u32Reserved = 0; 3779 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 3780 pGuestMsr++; cGuestMsrs++; 3781 } 3760 pGuestMsr->u32IndexMSR = MSR_K6_STAR; 3761 pGuestMsr->u32Reserved = 0; 3762 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 3763 pGuestMsr++; cGuestMsrs++; 3764 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK; 3765 pGuestMsr->u32Reserved = 0; 3766 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 3767 pGuestMsr++; cGuestMsrs++; 3768 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 3769 pGuestMsr->u32Reserved = 0; 3770 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 3771 pGuestMsr++; cGuestMsrs++; 3782 3772 } 3783 3773 … … 6532 6522 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6533 6523 { 6524 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 6525 /** @todo r=ramshankar: This is broken when 6526 * VBOX_WITH_VMMR0_DISABLE_PREEMPTION is not defined. As 6527 * VMXRestoreHostState() may unconditionally enables interrupts. */ 6528 #error "VMM: Fix Me! Make VMXRestoreHostState() function to skip cli/sti." 6529 #else 6530 Assert(ASMIntAreEnabled()); 6534 6531 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6532 #endif 6535 6533 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6536 6534 }
Note:
See TracChangeset
for help on using the changeset viewer.