Changeset 87563 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 3, 2021 1:23:13 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r87547 r87563 683 683 684 684 if ( pVM->hm.s.vmx.fSupported 685 && ( CTX_EXPR(pVM->hm.s. vmx.MsrsForRing3.ProcCtls.n.allowed1, g_HmMsrs.u.vmx.ProcCtls.n.allowed1, RT_NOTHING)685 && ( CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1, g_HmMsrs.u.vmx.ProcCtls.n.allowed1, RT_NOTHING) 686 686 & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 687 687 return true; -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r87538 r87563 186 186 bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); 187 187 #else 188 bool const fVGif = RT_BOOL(pVM->hm.s. svm.fFeaturesForRing3& X86_CPUID_SVM_FEATURE_EDX_VGIF);188 bool const fVGif = RT_BOOL(pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); 189 189 #endif 190 190 return fVGif && pVM->hm.s.svm.fVGif; -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r87547 r87563 768 768 { 769 769 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */ 770 uint32_t uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s. vmx.MsrsForRing3.u64Cr0Fixed0, g_HmMsrs.u.vmx.u64Cr0Fixed0, RT_NOTHING);770 uint32_t uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed0, g_HmMsrs.u.vmx.u64Cr0Fixed0, RT_NOTHING); 771 771 772 772 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */ … … 787 787 788 788 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */ 789 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s. vmx.MsrsForRing3.u64Cr0Fixed1, g_HmMsrs.u.vmx.u64Cr0Fixed1, RT_NOTHING);789 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed1, g_HmMsrs.u.vmx.u64Cr0Fixed1, RT_NOTHING); 790 790 if ((pCtx->cr0 & uCr0Mask) != 0) 791 791 return false; 792 792 793 793 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */ 794 uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s. vmx.MsrsForRing3.u64Cr4Fixed0, g_HmMsrs.u.vmx.u64Cr4Fixed0, RT_NOTHING);794 uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed0, g_HmMsrs.u.vmx.u64Cr4Fixed0, RT_NOTHING); 795 795 uCr0Mask &= ~X86_CR4_VMXE; 796 796 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask) … … 798 798 799 799 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */ 800 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s. vmx.MsrsForRing3.u64Cr4Fixed1, g_HmMsrs.u.vmx.u64Cr4Fixed1, RT_NOTHING);800 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed1, g_HmMsrs.u.vmx.u64Cr4Fixed1, RT_NOTHING); 801 801 if ((pCtx->cr4 & uCr0Mask) != 0) 802 802 return false; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r87559 r87563 1175 1175 pVM->hm.s.vmx.fUsePreemptTimerCfg = pVM->hmr0.s.vmx.fUsePreemptTimer; 1176 1176 pVM->hm.s.vmx.cPreemptTimerShift = g_cHmVmxPreemptTimerShift; 1177 pVM->hm.s. vmx.u64HostCr4ForRing3= g_uHmVmxHostCr4;1178 pVM->hm.s. vmx.u64HostMsrEferForRing3= g_uHmVmxHostMsrEfer;1179 pVM->hm.s. vmx.u64HostSmmMonitorCtlForRing3= g_uHmVmxHostSmmMonitorCtl;1180 HMGetVmxMsrsFromHwvirtMsrs(&g_HmMsrs, &pVM->hm.s. vmx.MsrsForRing3);1177 pVM->hm.s.ForR3.vmx.u64HostCr4 = g_uHmVmxHostCr4; 1178 pVM->hm.s.ForR3.vmx.u64HostMsrEfer = g_uHmVmxHostMsrEfer; 1179 pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl = g_uHmVmxHostSmmMonitorCtl; 1180 HMGetVmxMsrsFromHwvirtMsrs(&g_HmMsrs, &pVM->hm.s.ForR3.vmx.Msrs); 1181 1181 /* If you need to tweak host MSRs for testing VMX R0 code, do it here. */ 1182 1182 1183 1183 /* Enable VPID if supported and configured. */ 1184 1184 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID) 1185 pVM->hm.s.vmx.fVpidForRing3 1186 = pVM->hmr0.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */ 1185 pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */ 1187 1186 1188 1187 /* Use VMCS shadowing if supported. */ 1189 1188 pVM->hmr0.s.vmx.fUseVmcsShadowing = pVM->cpum.ro.GuestFeatures.fVmx 1190 1189 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING); 1191 pVM->hm.s. vmx.fUseVmcsShadowingForRing3= pVM->hmr0.s.vmx.fUseVmcsShadowing;1190 pVM->hm.s.ForR3.vmx.fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing; 1192 1191 1193 1192 /* Use the VMCS controls for swapping the EFER MSR if supported. */ 1194 pVM->hm.s. vmx.fSupportsVmcsEferForRing3= g_fHmVmxSupportsVmcsEfer;1193 pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer = g_fHmVmxSupportsVmcsEfer; 1195 1194 1196 1195 #if 0 … … 1210 1209 else if (pVM->hm.s.svm.fSupported) 1211 1210 { 1212 pVM->hm.s. svm.u32Rev= g_uHmSvmRev;1213 pVM->hm.s. svm.fFeaturesForRing3= g_fHmSvmFeatures;1214 pVM->hm.s. svm.u64MsrHwcr= g_HmMsrs.u.svm.u64MsrHwcr;1211 pVM->hm.s.ForR3.svm.u32Rev = g_uHmSvmRev; 1212 pVM->hm.s.ForR3.svm.fFeatures = g_fHmSvmFeatures; 1213 pVM->hm.s.ForR3.svm.u64MsrHwcr = g_HmMsrs.u.svm.u64MsrHwcr; 1215 1214 /* If you need to tweak host MSRs for testing SVM R0 code, do it here. */ 1216 1215 } 1217 pVM->hm.s. rcInit= g_rcHmInit;1218 pVM->hm.s. uMaxAsidForLog= g_uHmMaxAsid;1216 pVM->hm.s.ForR3.rcInit = g_rcHmInit; 1217 pVM->hm.s.ForR3.uMaxAsid = g_uHmMaxAsid; 1219 1218 1220 1219 /* … … 1275 1274 pVCpu->hmr0.s.fWorldSwitcher = fWorldSwitcher; 1276 1275 } 1277 pVM->hm.s. fWorldSwitcherForLog= fWorldSwitcher;1276 pVM->hm.s.ForR3.fWorldSwitcher = fWorldSwitcher; 1278 1277 1279 1278 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87561 r87563 994 994 AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG); 995 995 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg; 996 AssertReturn( !fNestedPaging 997 || (pVM->hm.s.svm.fFeaturesForRing3 & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), 998 VERR_INCOMPATIBLE_CONFIG); 996 AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG); 999 997 pVM->hmr0.s.fNestedPaging = fNestedPaging; 1000 998 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87562 r87563 1453 1453 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 1454 1454 } 1455 pVCpu->CTX_SUFF(pVM)->hm.s. rcInit = rc;1455 pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc; 1456 1456 } 1457 1457 … … 1738 1738 1739 1739 if (pVM) 1740 pVM->hm.s. vmx.HCPhysVmxEnableError = HCPhysCpuPage;1740 pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError = HCPhysCpuPage; 1741 1741 } 1742 1742 … … 3351 3351 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 3352 3352 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED; 3353 pVM->hm .s.vmx.fVpidForRing3 = pVM->hmr0.s.vmx.fVpid= false;3353 pVM->hmr0.s.vmx.fVpid = false; 3354 3354 } 3355 3355 } … … 3359 3359 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n")); 3360 3360 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED; 3361 pVM->hm .s.vmx.fVpidForRing3 = pVM->hmr0.s.vmx.fVpid= false;3361 pVM->hmr0.s.vmx.fVpid = false; 3362 3362 } 3363 3363 } … … 3379 3379 * Copy out the result to ring-3. 3380 3380 */ 3381 pVM->hm.s. vmx.fVpidForRing3= pVM->hmr0.s.vmx.fVpid;3382 pVM->hm.s. vmx.enmTlbFlushTypeForRing3= pVM->hmr0.s.vmx.enmTlbFlushType;3383 pVM->hm.s. vmx.enmTlbFlushEptForRing3= pVM->hmr0.s.vmx.enmTlbFlushEpt;3384 pVM->hm.s. vmx.enmTlbFlushVpidForRing3= pVM->hmr0.s.vmx.enmTlbFlushVpid;3381 pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid; 3382 pVM->hm.s.ForR3.vmx.enmTlbFlushType = pVM->hmr0.s.vmx.enmTlbFlushType; 3383 pVM->hm.s.ForR3.vmx.enmTlbFlushEpt = pVM->hmr0.s.vmx.enmTlbFlushEpt; 3384 pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid; 3385 3385 return VINF_SUCCESS; 3386 3386 } … … 3482 3482 pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr; 3483 3483 3484 pVM->hm.s. vmx.idLbrFromIpMsrFirstForRing3= pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;3485 pVM->hm.s. vmx.idLbrFromIpMsrLastForRing3= pVM->hmr0.s.vmx.idLbrFromIpMsrLast = idLbrFromIpMsrLast;3486 3487 pVM->hm.s. vmx.idLbrToIpMsrFirstForRing3= pVM->hmr0.s.vmx.idLbrToIpMsrFirst = idLbrToIpMsrFirst;3488 pVM->hm.s. vmx.idLbrToIpMsrLastForRing3= pVM->hmr0.s.vmx.idLbrToIpMsrLast = idLbrToIpMsrLast;3484 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst; 3485 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast = pVM->hmr0.s.vmx.idLbrFromIpMsrLast = idLbrFromIpMsrLast; 3486 3487 pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst = pVM->hmr0.s.vmx.idLbrToIpMsrFirst = idLbrToIpMsrFirst; 3488 pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast = pVM->hmr0.s.vmx.idLbrToIpMsrLast = idLbrToIpMsrLast; 3489 3489 return VINF_SUCCESS; 3490 3490 } … … 4537 4537 4538 4538 /* Initialize these always, see hmR3InitFinalizeR0().*/ 4539 pVM->hm.s. vmx.enmTlbFlushEptForRing3= pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;4540 pVM->hm.s. vmx.enmTlbFlushVpidForRing3= pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;4539 pVM->hm.s.ForR3.vmx.enmTlbFlushEpt = pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE; 4540 pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE; 4541 4541 4542 4542 /* Setup the tagged-TLB flush handlers. */ -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r87561 r87563 1057 1057 if ( !pVM->hm.s.vmx.fSupported 1058 1058 && !pVM->hm.s.svm.fSupported 1059 && pVM->hm.s. rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */1059 && pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */ 1060 1060 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE")) 1061 1061 { … … 1063 1063 pVM->hm.s.svm.fSupported = true; 1064 1064 pVM->hm.s.svm.fIgnoreInUseError = true; 1065 pVM->hm.s. rcInit = VINF_SUCCESS;1065 pVM->hm.s.ForR3.rcInit = VINF_SUCCESS; 1066 1066 } 1067 1067 … … 1072 1072 && !pVM->hm.s.svm.fSupported) 1073 1073 { 1074 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s. rcInit));1075 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s. vmx.MsrsForRing3.u64FeatCtrl));1076 switch (pVM->hm.s. rcInit)1074 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit)); 1075 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl)); 1076 switch (pVM->hm.s.ForR3.rcInit) 1077 1077 { 1078 1078 case VERR_VMX_IN_VMX_ROOT_MODE: … … 1098 1098 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS"); 1099 1099 } 1100 return VMSetError(pVM, pVM->hm.s. rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.rcInit);1100 return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit); 1101 1101 } 1102 1102 … … 1123 1123 1124 1124 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n", 1125 pVM->hm.s. fWorldSwitcherForLog, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,1125 pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry, 1126 1126 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry)); 1127 1127 … … 1502 1502 1503 1503 LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 1504 AssertLogRelReturn(pVM->hm.s. vmx.MsrsForRing3.u64FeatCtrl != 0, VERR_HM_IPE_4);1504 AssertLogRelReturn(pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4); 1505 1505 1506 1506 LogRel(("HM: Using VT-x implementation 3.0\n")); 1507 1507 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg)); 1508 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s. vmx.u64HostCr4ForRing3));1509 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s. vmx.u64HostMsrEferForRing3));1510 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s. vmx.u64HostSmmMonitorCtlForRing3));1511 1512 hmR3VmxReportFeatCtlMsr(pVM->hm.s. vmx.MsrsForRing3.u64FeatCtrl);1513 hmR3VmxReportBasicMsr(pVM->hm.s. vmx.MsrsForRing3.u64Basic);1514 1515 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s. vmx.MsrsForRing3.PinCtls);1516 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s. vmx.MsrsForRing3.ProcCtls);1517 if (pVM->hm.s. vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)1518 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s. vmx.MsrsForRing3.ProcCtls2);1519 1520 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s. vmx.MsrsForRing3.EntryCtls);1521 hmR3VmxReportExitCtlsMsr(&pVM->hm.s. vmx.MsrsForRing3.ExitCtls);1522 1523 if (RT_BF_GET(pVM->hm.s. vmx.MsrsForRing3.u64Basic, VMX_BF_BASIC_TRUE_CTLS))1508 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4)); 1509 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer)); 1510 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl)); 1511 1512 hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl); 1513 hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic); 1514 1515 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls); 1516 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls); 1517 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 1518 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2); 1519 1520 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls); 1521 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls); 1522 1523 if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS)) 1524 1524 { 1525 1525 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */ 1526 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s. vmx.MsrsForRing3.TruePinCtls));1527 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s. vmx.MsrsForRing3.TrueProcCtls));1528 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s. vmx.MsrsForRing3.TrueEntryCtls));1529 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s. vmx.MsrsForRing3.TrueExitCtls));1530 } 1531 1532 hmR3VmxReportMiscMsr(pVM, pVM->hm.s. vmx.MsrsForRing3.u64Misc);1533 hmR3VmxReportVmcsEnumMsr(pVM->hm.s. vmx.MsrsForRing3.u64VmcsEnum);1534 if (pVM->hm.s. vmx.MsrsForRing3.u64EptVpidCaps)1535 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s. vmx.MsrsForRing3.u64EptVpidCaps);1536 if (pVM->hm.s. vmx.MsrsForRing3.u64VmFunc)1537 hmR3VmxReportVmFuncMsr(pVM->hm.s. vmx.MsrsForRing3.u64VmFunc);1538 hmR3VmxReportCrFixedMsrs(&pVM->hm.s. vmx.MsrsForRing3);1526 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls)); 1527 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls)); 1528 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls)); 1529 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls)); 1530 } 1531 1532 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc); 1533 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum); 1534 if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps) 1535 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps); 1536 if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc) 1537 hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc); 1538 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs); 1539 1539 1540 1540 #ifdef TODO_9217_VMCSINFO … … 1564 1564 */ 1565 1565 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg 1566 || (pVM->hm.s. vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),1566 || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT), 1567 1567 VERR_HM_IPE_1); 1568 1568 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg 1569 || ( (pVM->hm.s. vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)1569 || ( (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST) 1570 1570 && pVM->hm.s.fNestedPagingCfg), 1571 1571 VERR_HM_IPE_1); … … 1576 1576 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel... 1577 1577 */ 1578 if ( !(pVM->hm.s. vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)1578 if ( !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 1579 1579 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)) 1580 1580 { … … 1662 1662 } 1663 1663 1664 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s. vmx.fSupportsVmcsEferForRing3));1664 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer)); 1665 1665 LogRel(("HM: Enabled VMX\n")); 1666 1666 pVM->hm.s.vmx.fEnabled = true; … … 1690 1690 { 1691 1691 LogRel(("HM: Enabled nested paging\n")); 1692 if (pVM->hm.s. vmx.enmTlbFlushEptForRing3== VMXTLBFLUSHEPT_SINGLE_CONTEXT)1692 if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT) 1693 1693 LogRel(("HM: EPT flush type = Single context\n")); 1694 else if (pVM->hm.s. vmx.enmTlbFlushEptForRing3== VMXTLBFLUSHEPT_ALL_CONTEXTS)1694 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS) 1695 1695 LogRel(("HM: EPT flush type = All contexts\n")); 1696 else if (pVM->hm.s. vmx.enmTlbFlushEptForRing3== VMXTLBFLUSHEPT_NOT_SUPPORTED)1696 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED) 1697 1697 LogRel(("HM: EPT flush type = Not supported\n")); 1698 1698 else 1699 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s. vmx.enmTlbFlushEptForRing3));1699 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt)); 1700 1700 1701 1701 if (pVM->hm.s.vmx.fUnrestrictedGuestCfg) … … 1712 1712 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg); 1713 1713 1714 if (pVM->hm.s. vmx.fVpidForRing3)1714 if (pVM->hm.s.ForR3.vmx.fVpid) 1715 1715 { 1716 1716 LogRel(("HM: Enabled VPID\n")); 1717 if (pVM->hm.s. vmx.enmTlbFlushVpidForRing3== VMXTLBFLUSHVPID_INDIV_ADDR)1717 if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR) 1718 1718 LogRel(("HM: VPID flush type = Individual addresses\n")); 1719 else if (pVM->hm.s. vmx.enmTlbFlushVpidForRing3== VMXTLBFLUSHVPID_SINGLE_CONTEXT)1719 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 1720 1720 LogRel(("HM: VPID flush type = Single context\n")); 1721 else if (pVM->hm.s. vmx.enmTlbFlushVpidForRing3== VMXTLBFLUSHVPID_ALL_CONTEXTS)1721 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS) 1722 1722 LogRel(("HM: VPID flush type = All contexts\n")); 1723 else if (pVM->hm.s. vmx.enmTlbFlushVpidForRing3== VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)1723 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1724 1724 LogRel(("HM: VPID flush type = Single context retain globals\n")); 1725 1725 else 1726 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s. vmx.enmTlbFlushVpidForRing3));1727 } 1728 else if (pVM->hm.s. vmx.enmTlbFlushVpidForRing3== VMXTLBFLUSHVPID_NOT_SUPPORTED)1726 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid)); 1727 } 1728 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED) 1729 1729 LogRel(("HM: Ignoring VPID capabilities of CPU\n")); 1730 1730 … … 1740 1740 LogRel(("HM: Enabled posted-interrupt processing support\n")); 1741 1741 1742 if (pVM->hm.s. vmx.fUseVmcsShadowingForRing3)1743 { 1744 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s. vmx.MsrsForRing3.u64Misc & VMX_MISC_VMWRITE_ALL);1742 if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing) 1743 { 1744 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL); 1745 1745 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial")); 1746 1746 } … … 1768 1768 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 1769 1769 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg)); 1770 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s. svm.u64MsrHwcr));1771 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s. svm.u32Rev));1772 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s. uMaxAsidForLog));1773 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s. svm.fFeaturesForRing3));1770 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr)); 1771 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.ForR3.svm.u32Rev)); 1772 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.ForR3.uMaxAsid)); 1773 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.ForR3.svm.fFeatures)); 1774 1774 1775 1775 /* … … 1796 1796 }; 1797 1797 1798 uint32_t fSvmFeatures = pVM->hm.s. svm.fFeaturesForRing3;1798 uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures; 1799 1799 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++) 1800 1800 if (fSvmFeatures & s_aSvmFeatures[i].fFlag) … … 1812 1812 */ 1813 1813 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg 1814 || (pVM->hm.s. svm.fFeaturesForRing3& X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),1814 || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), 1815 1815 VERR_HM_IPE_1); 1816 1816 … … 2829 2829 PVM pVM = pUVM->pVM; 2830 2830 VM_ASSERT_VALID_EXT_RETURN(pVM, false); 2831 return pVM->hm.s. vmx.fVpidForRing3;2831 return pVM->hm.s.ForR3.vmx.fVpid; 2832 2832 } 2833 2833 … … 3065 3065 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM) 3066 3066 { 3067 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s. vmx.MsrsForRing3.EntryCtls.n.allowed1));3068 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s. vmx.MsrsForRing3.EntryCtls.n.allowed0));3067 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1)); 3068 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0)); 3069 3069 } 3070 3070 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR) 3071 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s. vmx.HCPhysVmxEnableError));3071 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError)); 3072 3072 } 3073 3073 … … 3336 3336 { 3337 3337 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu); 3338 uint32_t const cLbrStack = pVM->hm.s. vmx.idLbrFromIpMsrLastForRing3 - pVM->hm.s.vmx.idLbrFromIpMsrFirstForRing3+ 1;3338 uint32_t const cLbrStack = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1; 3339 3339 3340 3340 /** @todo r=ramshankar: The index technically varies depending on the CPU, but … … 3359 3359 for (;;) 3360 3360 { 3361 if (pVM->hm.s. vmx.idLbrToIpMsrFirstForRing3)3361 if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst) 3362 3362 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent, 3363 3363 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]); -
trunk/src/VBox/VMM/include/HMInternal.h
r87562 r87563 440 440 bool afAlignment1[5]; 441 441 442 /** @todo r=bird: for better cache locality for SVM, it would be good to split443 * out the non-esssential data (i.e config and for-ring3 bits). */444 442 struct 445 443 { … … 450 448 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */ 451 449 uint8_t cPreemptTimerShift; 452 bool afAlignment1[5]; 453 454 /** Pause-loop exiting (PLE) gap in ticks. */ 455 uint32_t cPleGapTicks; 456 /** Pause-loop exiting (PLE) window in ticks. */ 457 uint32_t cPleWindowTicks; 458 459 /** Virtual address of the TSS page used for real mode emulation. */ 460 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 461 /** Virtual address of the identity page table used for real mode and protected 462 * mode without paging emulation in EPT mode. */ 463 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 450 bool fAlignment1; 464 451 465 452 /** @name Configuration (gets copied if problematic) … … 477 464 /** @} */ 478 465 479 /** @name For ring-3 consumption 480 * @{ */ 481 /** Set if VPID is supported (ring-3 copy). */ 482 bool fVpidForRing3; 483 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX 484 * init, for logging). */ 485 bool fSupportsVmcsEferForRing3; 486 /** Whether to use VMCS shadowing. */ 487 bool fUseVmcsShadowingForRing3; 488 bool fAlignment2; 489 490 /** Host CR4 value (set by ring-0 VMX init, for logging). */ 491 uint64_t u64HostCr4ForRing3; 492 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */ 493 uint64_t u64HostSmmMonitorCtlForRing3; 494 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */ 495 uint64_t u64HostMsrEferForRing3; 496 497 /** The first valid host LBR branch-from-IP stack range. */ 498 uint32_t idLbrFromIpMsrFirstForRing3; 499 /** The last valid host LBR branch-from-IP stack range. */ 500 uint32_t idLbrFromIpMsrLastForRing3; 501 502 /** The first valid host LBR branch-to-IP stack range. */ 503 uint32_t idLbrToIpMsrFirstForRing3; 504 /** The last valid host LBR branch-to-IP stack range. */ 505 uint32_t idLbrToIpMsrLastForRing3; 506 507 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */ 508 RTHCPHYS HCPhysVmxEnableError; 509 /** VMX MSR values (only for ring-3 consumption). */ 510 VMXMSRS MsrsForRing3; 511 512 /** Tagged-TLB flush type (only for ring-3 consumption). */ 513 VMXTLBFLUSHTYPE enmTlbFlushTypeForRing3; 514 /** Flush type to use for INVEPT (only for ring-3 consumption). */ 515 VMXTLBFLUSHEPT enmTlbFlushEptForRing3; 516 /** Flush type to use for INVVPID (only for ring-3 consumption). */ 517 VMXTLBFLUSHVPID enmTlbFlushVpidForRing3; 518 /** @} */ 466 /** Pause-loop exiting (PLE) gap in ticks. */ 467 uint32_t cPleGapTicks; 468 /** Pause-loop exiting (PLE) window in ticks. */ 469 uint32_t cPleWindowTicks; 470 471 /** Virtual address of the TSS page used for real mode emulation. */ 472 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 473 /** Virtual address of the identity page table used for real mode and protected 474 * mode without paging emulation in EPT mode. */ 475 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 519 476 } vmx; 520 477 … … 541 498 uint16_t cPauseFilterThresholdTicks; 542 499 uint32_t u32Alignment2; 543 544 /** @name For ring-3 consumption545 * @{ */546 /** SVM revision. */547 uint32_t u32Rev;548 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */549 uint32_t fFeaturesForRing3;550 /** HWCR MSR (for diagnostics). */551 uint64_t u64MsrHwcr;552 /** @} */553 500 } svm; 554 501 … … 566 513 /** Size of the guest patch memory block. */ 567 514 uint32_t cbGuestPatchMem; 568 569 /** Last recorded error code during HM ring-0 init. */ 570 int32_t rcInit; 571 572 /** Maximum ASID allowed. 573 * This is mainly for the release log. */ 574 uint32_t uMaxAsidForLog; 575 /** World switcher flags (HM_WSF_XXX) for the release log. */ 576 uint32_t fWorldSwitcherForLog; 515 uint32_t u32Alignment2; 516 517 /** For ring-3 use only. */ 518 struct 519 { 520 /** Last recorded error code during HM ring-0 init. */ 521 int32_t rcInit; 522 uint32_t u32Alignment3; 523 524 /** Maximum ASID allowed. 525 * This is mainly for the release log. */ 526 uint32_t uMaxAsid; 527 /** World switcher flags (HM_WSF_XXX) for the release log. */ 528 uint32_t fWorldSwitcher; 529 530 struct 531 { 532 /** Set if VPID is supported (ring-3 copy). */ 533 bool fVpid; 534 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX 535 * init, for logging). */ 536 bool fSupportsVmcsEfer; 537 /** Whether to use VMCS shadowing. */ 538 bool fUseVmcsShadowing; 539 bool fAlignment2; 540 541 /** Host CR4 value (set by ring-0 VMX init, for logging). */ 542 uint64_t u64HostCr4; 543 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */ 544 uint64_t u64HostSmmMonitorCtl; 545 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */ 546 uint64_t u64HostMsrEfer; 547 548 /** The first valid host LBR branch-from-IP stack range. */ 549 uint32_t idLbrFromIpMsrFirst; 550 /** The last valid host LBR branch-from-IP stack range. */ 551 uint32_t idLbrFromIpMsrLast; 552 553 /** The first valid host LBR branch-to-IP stack range. */ 554 uint32_t idLbrToIpMsrFirst; 555 /** The last valid host LBR branch-to-IP stack range. */ 556 uint32_t idLbrToIpMsrLast; 557 558 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */ 559 RTHCPHYS HCPhysVmxEnableError; 560 /** VMX MSR values (only for ring-3 consumption). */ 561 VMXMSRS Msrs; 562 563 /** Tagged-TLB flush type (only for ring-3 consumption). */ 564 VMXTLBFLUSHTYPE enmTlbFlushType; 565 /** Flush type to use for INVEPT (only for ring-3 consumption). */ 566 VMXTLBFLUSHEPT enmTlbFlushEpt; 567 /** Flush type to use for INVVPID (only for ring-3 consumption). */ 568 VMXTLBFLUSHVPID enmTlbFlushVpid; 569 } vmx; 570 571 struct 572 { 573 /** SVM revision. */ 574 uint32_t u32Rev; 575 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */ 576 uint32_t fFeatures; 577 /** HWCR MSR (for diagnostics). */ 578 uint64_t u64MsrHwcr; 579 } svm; 580 } ForR3; 577 581 578 582 /** @name Configuration not used (much) after VM setup … … 637 641 AssertCompileMemberAlignment(HM, vmx, 8); 638 642 AssertCompileMemberAlignment(HM, svm, 8); 643 AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8); 644 AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */ 639 645 640 646 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r87543 r87563 368 368 369 369 /* hm - 32-bit gcc won't align uint64_t naturally, so check. */ 370 CHECK_MEMBER_ALIGNMENT(HM, uMaxAsidForLog, 8);371 370 CHECK_MEMBER_ALIGNMENT(HM, vmx, 8); 372 CHECK_MEMBER_ALIGNMENT(HM, vmx.MsrsForRing3, 8);373 371 CHECK_MEMBER_ALIGNMENT(HM, svm, 8); 372 CHECK_MEMBER_ALIGNMENT(HM, ForR3.uMaxAsid, 8); 373 CHECK_MEMBER_ALIGNMENT(HM, ForR3.vmx, 8); 374 374 CHECK_MEMBER_ALIGNMENT(HM, PatchTree, 8); 375 375 CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8);
Note:
See TracChangeset
for help on using the changeset viewer.