Changeset 49752 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Dec 3, 2013 12:39:31 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49740 r49752 1400 1400 1401 1401 1402 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1402 #if HC_ARCH_BITS == 64 1403 1403 /** 1404 1404 * Saves a set of host MSRs to allow read/write passthru access to the guest and … … 1549 1549 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST); 1550 1550 } 1551 #endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)*/1551 #endif /* HC_ARCH_BITS == 64 */ 1552 1552 1553 1553 … … 2329 2329 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2330 2330 2331 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2331 #if HC_ARCH_BITS == 64 2332 2332 /* 2333 2333 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests. 2334 2334 */ 2335 if ( HMVMX_IS_64BIT_HOST_MODE() 2336 && pVM->hm.s.fAllow64BitGuests) 2335 if (pVM->hm.s.fAllow64BitGuests) 2337 2336 { 2338 2337 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); … … 3007 3006 3008 3007 int rc = VINF_SUCCESS; 3009 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3010 if ( HMVMX_IS_64BIT_HOST_MODE() 3011 && pVM->hm.s.fAllow64BitGuests) 3012 { 3008 #if HC_ARCH_BITS == 64 3009 if (pVM->hm.s.fAllow64BitGuests) 3013 3010 hmR0VmxLazySaveHostMsrs(pVCpu); 3014 }3015 3011 #endif 3016 3012 … … 4457 4453 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4458 4454 { 4455 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4459 4456 if (pVM->hm.s.fAllow64BitGuests) 4460 4457 { 4461 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4462 if (!HMVMX_IS_64BIT_HOST_MODE()) 4463 { 4464 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */); 4465 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */); 4466 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */); 4467 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */); 4468 } 4458 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */); 4459 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */); 4460 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */); 4461 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */); 4469 4462 # ifdef DEBUG 4470 4463 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; … … 4472 4465 Log4(("Load: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4473 4466 # endif 4474 #endif 4475 } 4467 } 4468 #endif 4476 4469 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4477 4470 } … … 5846 5839 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5847 5840 { 5848 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 5849 if ( HMVMX_IS_64BIT_HOST_MODE() 5850 && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 5841 #if HC_ARCH_BITS == 64 5842 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 5851 5843 { 5852 5844 /* We should not get preempted to a different CPU at this point while reading the MSRs. */ … … 5865 5857 } 5866 5858 else 5867 {5868 /* Darwin 32-bit/PAE kernel or 64-bit host running 32-bit guest. */5869 5859 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS; 5870 } 5871 #else /* HC_ARCH_BITS == 32 */ 5860 #else 5872 5861 NOREF(pMixedCtx); 5873 5862 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS; … … 6642 6631 #endif 6643 6632 6644 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)6633 #if HC_ARCH_BITS == 64 6645 6634 /* Restore the host MSRs as we're leaving VT-x context. */ 6646 if ( HMVMX_IS_64BIT_HOST_MODE() 6647 && pVM->hm.s.fAllow64BitGuests 6635 if ( pVM->hm.s.fAllow64BitGuests 6648 6636 && pVCpu->hm.s.vmx.fRestoreHostMsrs) 6649 6637 { … … 6870 6858 #endif 6871 6859 6872 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)6860 #if HC_ARCH_BITS == 64 6873 6861 /* Restore the host MSRs as we're leaving VT-x context. */ 6874 if ( HMVMX_IS_64BIT_HOST_MODE() 6875 && pVM->hm.s.fAllow64BitGuests 6862 if ( pVM->hm.s.fAllow64BitGuests 6876 6863 && pVCpu->hm.s.vmx.fRestoreHostMsrs) 6877 6864 { … … 7829 7816 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS)) 7830 7817 { 7831 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 7832 if ( HMVMX_IS_64BIT_HOST_MODE() 7833 && pVM->hm.s.fAllow64BitGuests) 7834 { 7818 #if HC_ARCH_BITS == 64 7819 if (pVM->hm.s.fAllow64BitGuests) 7835 7820 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 7836 }7837 7821 #endif 7838 7822 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); … … 9935 9919 HMVMX_RETURN_UNEXPECTED_EXIT(); 9936 9920 } 9937 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9938 if ( HMVMX_IS_64BIT_HOST_MODE() 9939 && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests 9921 # if HC_ARCH_BITS == 64 9922 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests 9940 9923 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 9941 9924 { … … 10017 10000 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 10018 10001 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 10019 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10020 else if ( HMVMX_IS_64BIT_HOST_MODE() 10021 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10022 { 10002 #if HC_ARCH_BITS == 64 10003 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10023 10004 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 10024 }10025 10005 #endif 10026 10006 break; … … 10054 10034 } 10055 10035 10056 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10057 if ( HMVMX_IS_64BIT_HOST_MODE() 10058 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10036 #if HC_ARCH_BITS == 64 10037 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10059 10038 { 10060 10039 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
Note:
See TracChangeset
for help on using the changeset viewer.