- Timestamp:
- May 13, 2014 11:36:49 AM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r51230 r51238 1574 1574 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu) 1575 1575 { 1576 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1577 1578 1576 uint32_t u32Val; 1579 1577 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); … … 3176 3174 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs(). 3177 3175 */ 3178 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3179 if ( HMVMX_IS_64BIT_HOST_MODE() 3180 && pVM->hm.s.vmx.fSupportsVmcsEfer) 3176 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 3181 3177 { 3182 3178 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer); 3183 3179 AssertRCReturn(rc, rc); 3184 3180 } 3185 #endif3186 3181 3187 3182 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see … … 3215 3210 return true; 3216 3211 #endif 3212 3213 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3214 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */ 3215 if (CPUMIsGuestInLongMode(pVCpu)) 3216 return false; 3217 #endif 3218 3217 3219 PVM pVM = pVCpu->CTX_SUFF(pVM); 3218 3220 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer; … … 3230 3232 3231 3233 /* 3232 * If the guest uses PAE and EFER.NXE bit differs, we need to swap as it affects guest paging. 3233 * 64-bit paging implies CR4.PAE as well. See Intel spec. 4.5 "IA32e Paging". 3234 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it . 3235 * affects guest paging. 64-bit paging implies CR4.PAE as well. 3236 * See Intel spec. 4.5 "IA32e Paging" and Intel spec. 4.1.1 "Three Paging Modes". 3234 3237 */ 3235 3238 if ( (pMixedCtx->cr4 & X86_CR4_PAE) 3239 && (pMixedCtx->cr0 & X86_CR0_PG) 3236 3240 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE)) 3237 3241 { 3242 /* Assert that host is PAE capable. */ 3243 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX); 3238 3244 return true; 3239 3245 } … … 3281 3287 3282 3288 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ 3283 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3284 if ( HMVMX_IS_64BIT_HOST_MODE() 3285 && pVM->hm.s.vmx.fSupportsVmcsEfer 3289 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3286 3290 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3287 3291 { 3288 3292 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR; 3289 Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n")); 3290 } 3291 #endif 3293 Log4(("Load: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n")); 3294 } 3292 3295 3293 3296 /* … … 3354 3357 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3355 3358 Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3356 3357 /* If the newer VMCS fields for managing EFER exists, use it. */3358 if ( pVM->hm.s.vmx.fSupportsVmcsEfer3359 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))3360 {3361 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR3362 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;3363 }3364 3359 } 3365 3360 else 3366 3361 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3367 #el if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)3362 #else 3368 3363 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3369 3364 { … … 3374 3369 else 3375 3370 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3376 #endif 3371 #endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 3372 3373 /* If the newer VMCS fields for managing EFER exists, use it. */ 3374 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3375 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3376 { 3377 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR 3378 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR; 3379 Log4(("Load: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n")); 3380 } 3377 3381 3378 3382 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */ … … 4720 4724 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR)) 4721 4725 { 4722 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4723 if (HMVMX_IS_64BIT_HOST_MODE()) 4726 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 4724 4727 { 4725 4728 /* … … 4729 4732 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 4730 4733 { 4731 /* Not strictly necessary to check hmR0VmxShouldSwapEferMsr() here, but it avoids 4732 one VM-write when we're a nested guest. */ 4733 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 4734 { 4735 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4736 AssertRCReturn(rc,rc); 4737 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER)); 4738 } 4734 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4735 AssertRCReturn(rc,rc); 4736 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER)); 4739 4737 } 4740 4738 else 4741 4739 { 4742 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 4743 { 4744 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */); 4745 /* We need to intercept reads too, see @bugref{7386} comment #16. */ 4746 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4747 Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER, 4748 pVCpu->hm.s.vmx.cMsrs)); 4749 } 4750 else 4751 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 4740 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */); 4741 /* We need to intercept reads too, see @bugref{7386} comment #16. */ 4742 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4743 Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER, 4744 pVCpu->hm.s.vmx.cMsrs)); 4752 4745 } 4753 4746 } 4754 #endif 4747 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer) 4748 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 4755 4749 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 4756 4750 }
Note:
See TracChangeset
for help on using the changeset viewer.