Changeset 73606 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Aug 10, 2018 7:38:56 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r73389 r73606 2775 2775 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2776 2776 static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" }; 2777 uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM 2778 : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE; 2777 bool const fSvm = pVM->cpum.ro.GuestFeatures.fSvm; 2778 bool const fVmx = pVM->cpum.ro.GuestFeatures.fVmx; 2779 uint8_t const idxHwvirtState = fSvm ? CPUMHWVIRTDUMP_SVM : (fVmx ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE); 2779 2780 AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes)); 2780 2781 Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes)); … … 2788 2789 2789 2790 if (fDumpState & CPUMHWVIRTDUMP_COMMON) 2790 { 2791 pHlp->pfnPrintf(pHlp, "fGif = %RTbool\n", pCtx->hwvirt.fGif); 2792 pHlp->pfnPrintf(pHlp, "fLocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions); 2793 } 2791 pHlp->pfnPrintf(pHlp, "fLocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions); 2792 2794 2793 pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, (fDumpState & (CPUMHWVIRTDUMP_SVM | CPUMHWVIRTDUMP_VMX)) ? 2795 2794 ":" : ""); 2796 2795 if (fDumpState & CPUMHWVIRTDUMP_SVM) 2797 2796 { 2797 pHlp->pfnPrintf(pHlp, " fGif = %RTbool\n", pCtx->hwvirt.fGif); 2798 2798 2799 char szEFlags[80]; 2799 2800 cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u); 2800 2801 2801 pHlp->pfnPrintf(pHlp, " uMsrHSavePa = %#RX64\n", pCtx->hwvirt.svm.uMsrHSavePa); 2802 2802 pHlp->pfnPrintf(pHlp, " GCPhysVmcb = %#RGp\n", pCtx->hwvirt.svm.GCPhysVmcb); … … 2839 2839 } 2840 2840 2841 /** @todo Intel. */2842 #if 02843 2841 if (fDumpState & CPUMHWVIRTDUMP_VMX) 2844 2842 { 2843 pHlp->pfnPrintf(pHlp, " fInVmxRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxRootMode); 2844 pHlp->pfnPrintf(pHlp, " fInVmxNonRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxNonRootMode); 2845 pHlp->pfnPrintf(pHlp, " GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon); 2846 pHlp->pfnPrintf(pHlp, " GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs); 2847 pHlp->pfnPrintf(pHlp, " enmInstrDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmInstrDiag, 2848 HMVmxGetInstrDiagDesc(pCtx->hwvirt.vmx.enmInstrDiag)); 2849 /** @todo NSTVMX: Dump remaining/new fields. */ 2845 2850 } 2846 #endif2847 2851 2848 2852 #undef CPUMHWVIRTDUMP_NONE -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r73389 r73606 3937 3937 AssertLogRelRCReturn(rc, rc); 3938 3938 3939 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 3940 /** @cfgm{/CPUM/NestedHWVirt, bool, false} 3941 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest. 3942 * The default is false, and when enabled requires nested paging and AMD-V or 3943 * unrestricted guest mode. 3944 */ 3945 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false); 3946 AssertLogRelRCReturn(rc, rc); 3947 if ( pConfig->fNestedHWVirt 3948 && !fNestedPagingAndFullGuestExec) 3949 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS, 3950 "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n"); 3951 3952 /** @todo Think about enabling this later with NEM/KVM. */ 3953 if ( pConfig->fNestedHWVirt 3954 && VM_IS_NEM_ENABLED(pVM)) 3955 { 3956 LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n")); 3957 pConfig->fNestedHWVirt = false; 3958 } 3939 bool fQueryNestedHwvirt = false; 3940 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3941 fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD); 3959 3942 #endif 3943 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3944 fQueryNestedHwvirt |= RT_BOOL( pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL 3945 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA); 3946 #endif 3947 if (fQueryNestedHwvirt) 3948 { 3949 /** @cfgm{/CPUM/NestedHWVirt, bool, false} 3950 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest. 3951 * The default is false, and when enabled requires nested paging and AMD-V or 3952 * unrestricted guest mode. 3953 */ 3954 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false); 3955 AssertLogRelRCReturn(rc, rc); 3956 if ( pConfig->fNestedHWVirt 3957 && !fNestedPagingAndFullGuestExec) 3958 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS, 3959 "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n"); 3960 3961 /** @todo Think about enabling this later with NEM/KVM. */ 3962 if ( pConfig->fNestedHWVirt 3963 && VM_IS_NEM_ENABLED(pVM)) 3964 { 3965 LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n")); 3966 pConfig->fNestedHWVirt = false; 3967 } 3968 } 3960 3969 3961 3970 /* -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r73097 r73606 1815 1815 } 1816 1816 1817 if (CPUMIsGuestInVmxN estedHwVirtMode(&pVCpu->cpum.GstCtx))1817 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 1818 1818 { /** @todo Nested VMX. */ } 1819 1819 … … 2147 2147 Assert(!HMR3IsEventPending(pVCpu)); 2148 2148 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2149 if (CPUMIsGuestIn NestedHwVirtMode(&pVCpu->cpum.GstCtx))2149 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 2150 2150 { 2151 2151 bool fResched, fInject; -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r73389 r73606 42 42 #include <VBox/vmm/stam.h> 43 43 #include <VBox/vmm/mm.h> 44 #include <VBox/vmm/em.h> 44 45 #include <VBox/vmm/pdmapi.h> 45 46 #include <VBox/vmm/pgm.h> … … 77 78 #define EXIT_REASON(def, val, str) #def " - " #val " - " str 78 79 #define EXIT_REASON_NIL() NULL 79 /** Exit reason descriptions for VT-x, used to describe statistics. */ 80 static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] = 80 /** Exit reason descriptions for VT-x, used to describe statistics and exit 81 * history. */ 82 static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] = 81 83 { 82 84 EXIT_REASON(VMX_EXIT_XCPT_OR_NMI , 0, "Exception or non-maskable interrupt (NMI)."), … … 149 151 #define MAX_EXITREASON_VTX 64 150 152 151 /** A partial list of Exitreason descriptions for AMD-V, used to describe152 * statistics .153 /** A partial list of \#EXIT reason descriptions for AMD-V, used to describe 154 * statistics and exit history. 153 155 * 154 156 * @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024), 155 157 * this array doesn't contain the entire set of exit reasons, we 156 158 * handle them via hmSvmGetSpecialExitReasonDesc(). */ 157 static const char * const g_apsz AmdVExitReasons[MAX_EXITREASON_STAT] =159 static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] = 158 160 { 159 161 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."), … … 310 312 /** 311 313 * Gets the SVM exit reason if it's one of the reasons not present in the @c 312 * g_apsz AmdVExitReasons array.314 * g_apszSvmExitReasons array. 313 315 * 314 316 * @returns The exit reason or NULL if unknown. … … 1061 1063 #undef HM_REG_COUNTER 1062 1064 1063 const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszV TxExitReasons[0]1064 : &g_apsz AmdVExitReasons[0];1065 const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVmxExitReasons[0] 1066 : &g_apszSvmExitReasons[0]; 1065 1067 1066 1068 /* … … 1938 1940 uint32_t u32Model; 1939 1941 uint32_t u32Stepping; 1940 if (HM AmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))1942 if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping)) 1941 1943 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 1942 1944 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); … … 2948 2950 2949 2951 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 2950 if (CPUMIsGuestInNestedHwVirtMode(pCtx)) 2952 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 2953 || CPUMIsGuestVmxEnabled(pCtx)) 2951 2954 { 2952 2955 Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false")); … … 3151 3154 && CPUMIsGuestInRealModeEx(pCtx) 3152 3155 && !PDMVmmDevHeapIsEnabled(pVM)) 3153 {3154 3156 return true; 3155 }3156 3157 3157 3158 return false; … … 3429 3430 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason)); 3430 3431 3431 if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMLAUCH_NON_CLEAR_VMCS3432 || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMRESUME_NON_LAUNCHED_VMCS)3432 if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS 3433 || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS) 3433 3434 { 3434 3435 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu)); 3435 3436 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu)); 3436 3437 } 3437 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMENTRY_INVALID_CONTROL_FIELDS)3438 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTL) 3438 3439 { 3439 3440 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls)); … … 3756 3757 VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit) 3757 3758 { 3758 if (uExit < RT_ELEMENTS(g_apszV TxExitReasons))3759 return g_apszV TxExitReasons[uExit];3759 if (uExit < RT_ELEMENTS(g_apszVmxExitReasons)) 3760 return g_apszVmxExitReasons[uExit]; 3760 3761 return NULL; 3761 3762 } … … 3770 3771 VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit) 3771 3772 { 3772 if (uExit < RT_ELEMENTS(g_apsz AmdVExitReasons))3773 return g_apsz AmdVExitReasons[uExit];3773 if (uExit < RT_ELEMENTS(g_apszSvmExitReasons)) 3774 return g_apszSvmExitReasons[uExit]; 3774 3775 return hmSvmGetSpecialExitReasonDesc(uExit); 3775 3776 } -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r73097 r73606 4549 4549 if (pVCpu->pgm.s.fA20Enabled != fEnable) 4550 4550 { 4551 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4552 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 4553 if ( CPUMIsGuestInVmxRootMode(pCtx) 4554 && !fEnable) 4555 { 4556 Log(("Cannot enter A20M mode while in VMX root mode\n")); 4557 return; 4558 } 4559 #endif 4551 4560 pVCpu->pgm.s.fA20Enabled = fEnable; 4552 4561 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
Note:
See TracChangeset
for help on using the changeset viewer.