Changeset 51220 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 9, 2014 1:51:16 AM (11 years ago)
- svn:sync-xref-src-repo-rev:
- 93632
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r51186 r51220 1359 1359 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1360 1360 */ 1361 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ SVM_GUEST_EFER_MSR))1361 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR)) 1362 1362 { 1363 1363 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1364 1364 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1365 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ SVM_GUEST_EFER_MSR);1365 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 1366 1366 } 1367 1367 … … 4433 4433 } 4434 4434 else if (pCtx->ecx == MSR_K6_EFER) 4435 HMCPU_CF_SET(pVCpu, HM_CHANGED_ SVM_GUEST_EFER_MSR);4435 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 4436 4436 else if (pCtx->ecx == MSR_IA32_TSC) 4437 4437 pSvmTransient->fUpdateTscOffsetting = true; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r51182 r51220 1556 1556 1557 1557 1558 /** 1559 * Verifies that our cached values of the VMCS controls are all 1560 * consistent with what's actually present in the VMCS. 1561 * 1562 * @returns VBox status code. 1563 * @param pVCpu Pointer to the VMCPU. 1564 */ 1565 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu) 1566 { 1567 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1568 1569 uint32_t u32Val; 1570 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 1571 AssertRCReturn(rc, rc); 1572 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val), 1573 VERR_VMX_ENTRY_CTLS_CACHE_INVALID); 1574 1575 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); 1576 AssertRCReturn(rc, rc); 1577 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val), 1578 VERR_VMX_EXIT_CTLS_CACHE_INVALID); 1579 1580 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); 1581 AssertRCReturn(rc, rc); 1582 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val), 1583 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID); 1584 1585 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); 1586 AssertRCReturn(rc, rc); 1587 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val), 1588 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID); 1589 1590 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); 1591 AssertRCReturn(rc, rc); 1592 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val), 1593 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID); 1594 1595 return VINF_SUCCESS; 1596 } 1597 1598 1558 1599 #ifdef VBOX_STRICT 1600 /** 1601 * Verifies that our cached host EFER value has not changed 1602 * since we cached it. 1603 * 1604 * @param pVCpu Pointer to the VMCPU. 1605 */ 1606 static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu) 1607 { 1608 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1609 1610 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR) 1611 { 1612 uint64_t u64Val; 1613 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val); 1614 AssertRC(rc); 1615 1616 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER); 1617 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val)); 1618 } 1619 } 1620 1621 1559 1622 /** 1560 1623 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the … … 2660 2723 return rc; 2661 2724 } 2725 2726 /* Check if we can use the VMCS controls for swapping the EFER MSR. */ 2727 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer); 2728 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2729 if ( HMVMX_IS_64BIT_HOST_MODE() 2730 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 2731 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR) 2732 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR)) 2733 { 2734 pVM->hm.s.vmx.fSupportsVmcsEfer = true; 2735 } 2736 #endif 2662 2737 2663 2738 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 3079 3154 AssertRCReturn(rc, rc); 3080 3155 3081 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see 3082 * hmR0VmxSetupExitCtls() !! */ 3156 /* 3157 * If the CPU supports the newer VMCS controls for managing EFER, use it. 3158 */ 3159 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3160 if ( HMVMX_IS_64BIT_HOST_MODE() 3161 && pVM->hm.s.vmx.fSupportsVmcsEfer) 3162 { 3163 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer); 3164 AssertRCReturn(rc, rc); 3165 } 3166 #endif 3167 3168 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see 3169 * hmR0VmxLoadGuestExitCtls() !! */ 3170 3083 3171 return rc; 3172 } 3173 3174 3175 /** 3176 * Figures out if we need to swap the EFER MSR which is 3177 * particularly expensive. 3178 * 3179 * We check all relevant bits. For now, that's everything 3180 * besides LMA/LME, as these two bits are handled by VM-entry, 3181 * see hmR0VmxLoadGuestExitCtls() and 3182 * hmR0VMxLoadGuestEntryCtls(). 3183 * 3184 * @returns true if we need to load guest EFER, false otherwise. 3185 * @param pVCpu Pointer to the VMCPU. 3186 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3187 * out-of-sync. Make sure to update the required fields 3188 * before using them. 3189 * 3190 * @remarks Requires EFER, CR4. 3191 * @remarks No-long-jump zone!!! 3192 */ 3193 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3194 { 3195 PVM pVM = pVCpu->CTX_SUFF(pVM); 3196 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer; 3197 uint64_t u64GuestEfer = pMixedCtx->msrEFER; 3198 3199 /* 3200 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the 3201 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}. 3202 */ 3203 if ( pVM->hm.s.fAllow64BitGuests 3204 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3205 { 3206 return true; 3207 } 3208 3209 /* 3210 * If the guest uses PAE and EFER.NXE bit differs, we need to swap as it affects guest paging. 3211 * 64-bit paging implies CR4.PAE as well. See Intel spec. 4.5 "IA32e Paging". 3212 */ 3213 if ( (pMixedCtx->cr4 & X86_CR4_PAE) 3214 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE)) 3215 { 3216 return true; 3217 } 3218 3219 /** @todo Check the latest Intel spec. for any other bits, 3220 * like SMEP/SMAP? */ 3221 return false; 3084 3222 } 3085 3223 … … 3096 3234 * before using them. 3097 3235 * 3236 * @remarks Requires EFER. 3098 3237 * @remarks No-long-jump zone!!! 3099 3238 */ … … 3112 3251 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 3113 3252 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3253 { 3114 3254 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST; 3255 Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n")); 3256 } 3115 3257 else 3116 3258 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST)); 3259 3260 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ 3261 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3262 if ( HMVMX_IS_64BIT_HOST_MODE() 3263 && pVM->hm.s.vmx.fSupportsVmcsEfer 3264 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3265 { 3266 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR; 3267 Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n")); 3268 } 3269 #endif 3117 3270 3118 3271 /* … … 3123 3276 3124 3277 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR, 3125 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR, 3126 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */ 3278 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */ 3127 3279 3128 3280 if ((val & zap) != val) … … 3155 3307 * before using them. 3156 3308 * 3157 * @remarks requires EFER.3309 * @remarks Requires EFER. 3158 3310 */ 3159 3311 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) … … 3177 3329 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3178 3330 if (HMVMX_IS_64BIT_HOST_MODE()) 3331 { 3179 3332 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3333 Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3334 3335 /* If the newer VMCS fields for managing EFER exists, use it. */ 3336 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3337 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3338 { 3339 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR 3340 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR; 3341 } 3342 } 3180 3343 else 3181 3344 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3182 3345 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3183 3346 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3184 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */ 3347 { 3348 /* The switcher returns to long mode, EFER is managed by the switcher. */ 3349 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3350 Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3351 } 3185 3352 else 3186 3353 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); … … 3192 3359 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR, 3193 3360 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR, 3194 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR, 3195 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, 3196 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */ 3361 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */ 3197 3362 3198 3363 if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER) … … 4530 4695 } 4531 4696 4697 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR)) 4698 { 4699 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4700 if ( HMVMX_IS_64BIT_HOST_MODE() 4701 && pVM->hm.s.vmx.fSupportsVmcsEfer 4702 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) /* Not really needed here, but avoids a VM-write as a nested guest. */ 4703 { 4704 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4705 AssertRCReturn(rc,rc); 4706 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER)); 4707 } 4708 #endif 4709 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 4710 } 4711 4532 4712 return VINF_SUCCESS; 4533 4713 } … … 4587 4767 { 4588 4768 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4589 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS), 4590 ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4769 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT 4770 | HM_CHANGED_VMX_EXIT_CTLS 4771 | HM_CHANGED_VMX_ENTRY_CTLS 4772 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4591 4773 } 4592 4774 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; … … 4606 4788 { 4607 4789 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4608 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS), 4609 ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4790 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT 4791 | HM_CHANGED_VMX_EXIT_CTLS 4792 | HM_CHANGED_VMX_ENTRY_CTLS 4793 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4610 4794 } 4611 4795 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; … … 7892 8076 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7893 8077 8078 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we 8079 determine we don't have to swap EFER after all. */ 7894 8080 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 7895 8081 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 8003 8189 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 8004 8190 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8005 8006 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE8007 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);8008 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)8009 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));8010 #endif8011 8191 } 8012 8192 … … 8282 8462 } 8283 8463 } 8464 8284 8465 #ifdef VBOX_STRICT 8285 8466 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu); 8467 hmR0VmxCheckHostEferMsr(pVCpu); 8468 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu)); 8469 #endif 8470 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 8471 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx); 8472 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8473 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 8286 8474 #endif 8287 8475 } … … 8342 8530 #endif 8343 8531 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 8532 #ifdef VBOX_STRICT 8533 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */ 8534 #endif 8344 8535 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */ 8345 8536 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ … … 8763 8954 * @param pVCpu Pointer to the VMCPU. 8764 8955 * @param pCtx Pointer to the guest-CPU state. 8956 * 8957 * @remarks This function assumes our cache of the VMCS controls 8958 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded. 8765 8959 */ 8766 8960 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 8964 9158 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 8965 9159 { 9160 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer); 8966 9161 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val); 8967 9162 AssertRCBreak(rc); 8968 9163 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 8969 9164 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */ 8970 HMVMX_CHECK_BREAK( (u64Val & MSR_K6_EFER_LMA) ==(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),9165 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST), 8971 9166 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 8972 9167 HMVMX_CHECK_BREAK( fUnrestrictedGuest 8973 || (u64Val & MSR_K6_EFER_LMA) == (u32GuestCR0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH); 9168 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u32GuestCR0 & X86_CR0_PG), 9169 VMX_IGS_EFER_LMA_PG_MISMATCH); 8974 9170 } 8975 9171 … … 9958 10154 AssertRCReturn(rc, rc); 9959 10155 10156 rc = hmR0VmxCheckVmcsCtls(pVCpu); 10157 AssertRCReturn(rc, rc); 10158 9960 10159 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 9961 10160 NOREF(uInvalidReason); … … 10163 10362 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ 10164 10363 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 10364 else if (pMixedCtx->ecx == MSR_K6_EFER) 10365 { 10366 /* 10367 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well, 10368 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about 10369 * the other bits as well, SCE and NXE. See @bugref{7368}. 10370 */ 10371 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS); 10372 } 10165 10373 10166 10374 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */ -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r50918 r51220 352 352 * Misc initialisation. 353 353 */ 354 //pVM->hm.s.vmx.fSupported = false; 355 //pVM->hm.s.svm.fSupported = false; 356 //pVM->hm.s.vmx.fEnabled = false; 357 //pVM->hm.s.svm.fEnabled = false; 358 //pVM->hm.s.fNestedPaging = false; 359 354 #if 0 355 pVM->hm.s.vmx.fSupported = false; 356 pVM->hm.s.svm.fSupported = false; 357 pVM->hm.s.vmx.fEnabled = false; 358 pVM->hm.s.svm.fEnabled = false; 359 pVM->hm.s.fNestedPaging = false; 360 #endif 360 361 361 362 /* … … 1260 1261 } 1261 1262 1263 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer)); 1262 1264 LogRel(("HM: VMX enabled!\n")); 1263 1265 pVM->hm.s.vmx.fEnabled = true; -
trunk/src/VBox/VMM/include/HMInternal.h
r51083 r51220 148 148 #define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14) 149 149 #define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15) 150 #define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(16) /* Shared */ 150 #define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16) 151 #define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */ 151 152 /* VT-x specific state. */ 152 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(1 7)153 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(1 8)154 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT( 19)155 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(2 0)156 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(2 1)153 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(18) 154 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(19) 155 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(20) 156 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(21) 157 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(22) 157 158 /* AMD-V specific state. */ 158 #define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(17)159 159 #define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18) 160 160 #define HM_CHANGED_SVM_RESERVED1 RT_BIT(19) 161 161 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(20) 162 162 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(21) 163 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(22) 163 164 164 165 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \ … … 178 179 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \ 179 180 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \ 181 | HM_CHANGED_GUEST_EFER_MSR \ 180 182 | HM_CHANGED_GUEST_LAZY_MSRS \ 181 183 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \ … … 185 187 | HM_CHANGED_VMX_EXIT_CTLS) 186 188 187 #define HM_CHANGED_HOST_CONTEXT RT_BIT(2 2)189 #define HM_CHANGED_HOST_CONTEXT RT_BIT(23) 188 190 189 191 /* Bits shared between host and guest. */ … … 414 416 /** Host EFER value (set by ring-0 VMX init) */ 415 417 uint64_t u64HostEfer; 418 /** Whether the CPU supports VMCS fields for swapping EFER. */ 419 bool fSupportsVmcsEfer; 420 bool afAlignment1[7]; 416 421 417 422 /** VMX MSR values */
Note:
See TracChangeset
for help on using the changeset viewer.