Changeset 107854 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 18, 2025 11:59:26 PM (3 months ago)
- svn:sync-xref-src-repo-rev:
- 167053
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r107749 r107854 1403 1403 1404 1404 1405 void cpumCpuIdExplodeFeaturesX86SetSummaryBits(CPUMFEATURESX86 *pFeatures) 1406 { 1407 /* Summary or all bits indicating the presence of the IA32_SPEC_CTRL MSR. */ 1408 pFeatures->fSpecCtrlMsr = pFeatures->fIbrs 1409 | pFeatures->fStibp 1410 | pFeatures->fSsbd 1411 | pFeatures->fPsfd 1412 | pFeatures->fIpredCtrl 1413 | pFeatures->fRrsbaCtrl 1414 | pFeatures->fDdpdU 1415 | pFeatures->fBhiCtrl 1416 ; 1417 } 1418 1419 1405 1420 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, CPUMFEATURESX86 *pFeatures) 1406 1421 { … … 1565 1580 pFeatures->fMmx |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX); 1566 1581 pFeatures->fTsc |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC); 1567 pFeatures->fIbpb |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);1568 1582 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX); 1569 1583 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP); 1570 1584 pFeatures->fTbm = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_TBM); 1571 1585 pFeatures->fSvm = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM); 1586 1587 if (pExtLeaf8) 1588 { 1589 pFeatures->fIbpb |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB); 1590 pFeatures->fIbrs |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBRS); 1591 pFeatures->fStibp |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_STIBP); 1592 pFeatures->fSsbd |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_SPEC_CTRL_SSBD); 1593 pFeatures->fPsfd |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_PSFD); 1594 } 1595 1596 PCCPUMCPUIDLEAF pExtLeaf21 = cpumCpuIdFindLeaf(paLeaves, cLeaves, 0x80000021); 1597 if (pExtLeaf21) 1598 { 1599 /** @todo IBPB_BRTYPE is implied on Zen 1 & 2. 1600 * https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf */ 1601 } 1602 1572 1603 if (pFeatures->fSvm) 1573 1604 { … … 1655 1686 else 1656 1687 AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1); 1688 1689 cpumCpuIdExplodeFeaturesX86SetSummaryBits(pFeatures); 1657 1690 return VINF_SUCCESS; 1658 1691 } … … 1662 1695 * Helper for extracting feature bits from IA32_ARCH_CAPABILITIES. 1663 1696 */ 1664 staticvoid cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal)1697 void cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal) 1665 1698 { 1666 1699 Assert(fHasArchCap || fArchVal == 0); -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r107703 r107854 165 165 /* 166 166 * Get MSR_IA32_ARCH_CAPABILITIES and expand it into the host feature structure. 167 * 168 * AMD CPUs doesn't have this register, similar info is available in EBX in 169 * CPUID leaf 0x80000008 167 170 */ 168 171 if (ASMHasCpuId()) 169 172 { 170 /** @todo Should add this MSR to CPUMMSRS and expose it via SUPDrv... */171 g_CpumHostFeatures.s.fArchRdclNo = 0;172 g_CpumHostFeatures.s.fArchIbrsAll = 0;173 g_CpumHostFeatures.s.fArchRsbOverride = 0;174 g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = 0;175 g_CpumHostFeatures.s.fArchMdsNo = 0;176 173 uint32_t const cStdRange = ASMCpuId_EAX(0); 177 174 if ( RTX86IsValidStdRange(cStdRange) … … 183 180 if ( (fStdExtFeaturesEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP) 184 181 && (fStdFeaturesEdx & X86_CPUID_FEATURE_EDX_MSR)) 185 { 186 uint64_t fArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES); 187 g_CpumHostFeatures.s.fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO); 188 g_CpumHostFeatures.s.fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL); 189 g_CpumHostFeatures.s.fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO); 190 g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D); 191 g_CpumHostFeatures.s.fArchMdsNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO); 192 } 193 else 194 g_CpumHostFeatures.s.fArchCap = 0; 182 cpumCpuIdExplodeArchCapabilities(&g_CpumHostFeatures.s, true, ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES)); 195 183 } 196 184 } … … 363 351 * Note! We assume this happens after the CPUMR3Init is done, so CPUID bits are settled. 364 352 */ 365 uint64_t fHostArchVal = 0; 366 bool fHasArchCap = false; 367 uint32_t const cStdRange = ASMCpuId_EAX(0); 353 /** @todo Should add this MSR to CPUMMSRS and expose it via SUPDrv... */ 354 uint32_t const cStdRange = ASMCpuId_EAX(0); 368 355 if ( RTX86IsValidStdRange(cStdRange) 369 356 && cStdRange >= 7) … … 373 360 if ( (fEdxFeatures & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP) 374 361 && (fFeatures & X86_CPUID_FEATURE_EDX_MSR)) 375 { 376 fHostArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES); 377 fHasArchCap = true; 378 } 379 } 380 CPUMCpuIdApplyX86HostArchCapabilities(pVM, fHasArchCap, fHostArchVal); 362 CPUMCpuIdApplyX86HostArchCapabilities(pVM, true, ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES)); 363 } 381 364 382 365 /* -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r106061 r107854 1332 1332 * Configure defences against spectre and other CPU bugs. 1333 1333 */ 1334 /* Determin the flags: */ 1334 1335 uint32_t fWorldSwitcher = 0; 1335 uint32_t cLastStdLeaf = ASMCpuId_EAX(0); 1336 if (cLastStdLeaf >= 0x00000007 && RTX86IsValidStdRange(cLastStdLeaf)) 1337 { 1338 uint32_t uEdx = 0; 1339 ASMCpuIdExSlow(0x00000007, 0, 0, 0, NULL, NULL, NULL, &uEdx); 1340 1341 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB) 1342 { 1343 if (pVM->hm.s.fIbpbOnVmExit) 1344 fWorldSwitcher |= HM_WSF_IBPB_EXIT; 1345 if (pVM->hm.s.fIbpbOnVmEntry) 1346 fWorldSwitcher |= HM_WSF_IBPB_ENTRY; 1347 } 1348 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD) 1349 { 1350 if (pVM->hm.s.fL1dFlushOnVmEntry) 1351 fWorldSwitcher |= HM_WSF_L1D_ENTRY; 1352 else if (pVM->hm.s.fL1dFlushOnSched) 1353 fWorldSwitcher |= HM_WSF_L1D_SCHED; 1354 } 1355 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR) 1356 { 1357 if (pVM->hm.s.fMdsClearOnVmEntry) 1358 fWorldSwitcher |= HM_WSF_MDS_ENTRY; 1359 else if (pVM->hm.s.fMdsClearOnSched) 1360 fWorldSwitcher |= HM_WSF_MDS_SCHED; 1361 } 1362 } 1336 if (g_CpumHostFeatures.s.fIbpb) 1337 { 1338 if (pVM->hm.s.fIbpbOnVmExit) 1339 fWorldSwitcher |= HM_WSF_IBPB_EXIT; 1340 if (pVM->hm.s.fIbpbOnVmEntry) 1341 fWorldSwitcher |= HM_WSF_IBPB_ENTRY; 1342 } 1343 if (g_CpumHostFeatures.s.fFlushCmd) 1344 { 1345 if (pVM->hm.s.fL1dFlushOnVmEntry) 1346 fWorldSwitcher |= HM_WSF_L1D_ENTRY; 1347 else if (pVM->hm.s.fL1dFlushOnSched) 1348 fWorldSwitcher |= HM_WSF_L1D_SCHED; 1349 } 1350 if (g_CpumHostFeatures.s.fMdsClear) 1351 { 1352 if (pVM->hm.s.fMdsClearOnVmEntry) 1353 fWorldSwitcher |= HM_WSF_MDS_ENTRY; 1354 else if (pVM->hm.s.fMdsClearOnSched) 1355 fWorldSwitcher |= HM_WSF_MDS_SCHED; 1356 } 1357 if (g_CpumHostFeatures.s.fSpecCtrlMsr) 1358 { 1359 /** @todo this may be too early for intel? */ 1360 if (pVM->cpum.ro.GuestFeatures.fSpecCtrlMsr) 1361 fWorldSwitcher |= HM_WSF_SPEC_CTRL; 1362 } 1363 1364 /* Distribute the flags. */ 1363 1365 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 1364 1366 { … … 1367 1369 } 1368 1370 pVM->hm.s.ForR3.fWorldSwitcher = fWorldSwitcher; 1369 1370 1371 1371 1372 /* -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r106061 r107854 522 522 ; 523 523 ; @note Important that this does not modify cbFrame or rsp. 524 ; @note HM_WSF_SPEC_CTRL is handled differently here at the moment. 524 525 %macro RESTORE_STATE_VMX 4 525 526 ; Restore base and limit of the IDTR & GDTR. … … 586 587 %endif 587 588 588 %if %3& HM_WSF_IBPB_EXIT589 %if (%3) & HM_WSF_IBPB_EXIT 589 590 ; Fight spectre (trashes rax, rdx and rcx). 590 591 %if %1 = 0 ; Skip this in failure branch (=> guru) … … 744 745 745 746 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher] 746 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT 747 cmp eax, %3747 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT ; | HM_WSF_SPEC_CTRL 748 cmp eax, (%3) 748 749 mov eax, VERR_VMX_STARTVM_PRECOND_1 749 750 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return) … … 866 867 ; Fight spectre and similar. Trashes rax, rcx, and rdx. 867 868 ; 868 %if %3& (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two.869 %if (%3) & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY) ; The eax:edx value is the same for the first two. 869 870 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D) 870 871 mov eax, MSR_IA32_PRED_CMD_F_IBPB 871 872 xor edx, edx 872 873 %endif 873 %if %3& HM_WSF_IBPB_ENTRY ; Indirect branch barrier.874 %if (%3) & HM_WSF_IBPB_ENTRY ; Indirect branch barrier. 874 875 mov ecx, MSR_IA32_PRED_CMD 875 876 wrmsr 876 877 %endif 877 %if %3& HM_WSF_L1D_ENTRY ; Level 1 data cache flush.878 %if (%3) & HM_WSF_L1D_ENTRY ; Level 1 data cache flush. 878 879 mov ecx, MSR_IA32_FLUSH_CMD 879 880 wrmsr 880 %elif %3& HM_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH881 %elif (%3) & HM_WSF_MDS_ENTRY ; MDS flushing is included in L1D_FLUSH 881 882 mov word [rbp + frm_MDS_seg], ds 882 883 verw word [rbp + frm_MDS_seg] … … 998 999 %endif ; %4 != 0 999 1000 1001 .return_with_restored_preserved_registers: 1000 1002 lea rsp, [rbp + frm_fRFlags] 1001 1003 popf … … 1030 1032 %error Bad frame size value: cbFrame, expected cbBaseFrame 1031 1033 %endif 1032 jmp . vmstart64_end1034 jmp .return_with_restored_preserved_registers 1033 1035 %endif 1034 1036 … … 1099 1101 ; @param 1 The suffix of the variation. 1100 1102 ; @param 2 fLoadSaveGuestXcr0 value 1101 ; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.1103 ; @param 3 The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT + HM_WSF_SPEC_CTRL value. 1102 1104 ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor. 1103 1105 ; Drivers shouldn't use AVX registers without saving+loading: … … 1148 1150 SEH64_SET_FRAME_xBP 0 1149 1151 pushf 1150 %assign cbFrame 30h1152 %assign cbFrame 40h 1151 1153 %if %4 != 0 1152 1154 %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit) … … 1161 1163 %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun. 1162 1164 %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun. 1165 %define frm_uHostSpecCtrl -040h ; Saved IA32_MSR_SPEC_CTRL value. 1163 1166 %if %4 != 0 1164 %define frm_saved_xmm6 -0 40h1165 %define frm_saved_xmm7 -0 50h1166 %define frm_saved_xmm8 -0 60h1167 %define frm_saved_xmm9 -0 70h1168 %define frm_saved_xmm10 -0 80h1169 %define frm_saved_xmm11 -0 90h1170 %define frm_saved_xmm12 -0 a0h1171 %define frm_saved_xmm13 -0 b0h1172 %define frm_saved_xmm14 -0 c0h1173 %define frm_saved_xmm15 -0 d0h1174 %define frm_saved_mxcsr -0 e0h1167 %define frm_saved_xmm6 -050h 1168 %define frm_saved_xmm7 -060h 1169 %define frm_saved_xmm8 -070h 1170 %define frm_saved_xmm9 -080h 1171 %define frm_saved_xmm10 -090h 1172 %define frm_saved_xmm11 -0a0h 1173 %define frm_saved_xmm12 -0b0h 1174 %define frm_saved_xmm13 -0c0h 1175 %define frm_saved_xmm14 -0d0h 1176 %define frm_saved_xmm15 -0e0h 1177 %define frm_saved_mxcsr -0f0h 1175 1178 %endif 1176 1179 … … 1208 1211 1209 1212 mov eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher] 1210 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT 1211 cmp eax, %31213 and eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT | HM_WSF_SPEC_CTRL 1214 cmp eax, (%3) 1212 1215 mov eax, VERR_SVM_VMRUN_PRECOND_1 1213 1216 jne .failure_return … … 1291 1294 %endif 1292 1295 1296 %if (%3) & HM_WSF_SPEC_CTRL 1297 ; Save host MSR_IA32_SPEC_CTRL and load the guest one (trashes rax, rdx, rcx, rbx). 1298 ; HACK ALERT! Boldly ASSUMES that CPUMCTXMSRS follows immediately after GstCtx (CPUMCTX). 1299 mov rbx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX_size + CPUMCTXMSRS.msr.SpecCtrl] ; rbx = guest IA32_SPEC_CTRL 1300 mov ecx, MSR_IA32_SPEC_CTRL 1301 rdmsr ; edx:eax = host IA32_SPEC_CTRL value 1302 shl rdx, 32 1303 or rdx, rax ; rdx = host IA32_SPEC_CTRL value 1304 mov [rbp + frm_uHostSpecCtrl], rdx 1305 cmp rdx, rbx ; avoid wrmsr if we can. 1306 je .skip_spec_ctrl_load 1307 mov eax, ebx 1308 mov rdx, rbx 1309 shr rdx, 32 1310 wrmsr 1311 .skip_spec_ctrl_load: 1312 %endif 1313 1293 1314 ; Save host fs, gs, sysenter msr etc. 1294 1315 mov rax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.svm + HMR0CPUSVM.HCPhysVmcbHost] … … 1298 1319 vmsave 1299 1320 1300 %if %3& HM_WSF_IBPB_ENTRY1321 %if (%3) & HM_WSF_IBPB_ENTRY 1301 1322 ; Fight spectre (trashes rax, rdx and rcx). 1302 1323 mov ecx, MSR_IA32_PRED_CMD … … 1389 1410 mov r15, [rbp + frm_saved_r15] 1390 1411 1391 %if %4 != 0 1392 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state .1412 %if %4 != 0 || ((%3) & HM_WSF_SPEC_CTRL) 1413 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state as well as for IA32_SPEC_CTRL. 1393 1414 mov r8, rcx 1394 1415 %endif 1395 1416 1396 %if %3& HM_WSF_IBPB_EXIT1417 %if (%3) & HM_WSF_IBPB_EXIT 1397 1418 ; Fight spectre (trashes rax, rdx and rcx). 1398 1419 mov ecx, MSR_IA32_PRED_CMD … … 1408 1429 mov rax, [rbp + frm_uHostXcr0] 1409 1430 xsetbv 1431 %endif 1432 1433 %if (%3) & HM_WSF_SPEC_CTRL 1434 ; Save guest MSR_IA32_SPEC_CTRL and load the host one (trashes rax, rdx, rcx, r10). 1435 mov r10, [rbp + frm_uHostSpecCtrl] ; r10 = host IA32_SPEC_CTRL 1436 mov ecx, MSR_IA32_SPEC_CTRL 1437 rdmsr ; edx:eax = guest IA32_SPEC_CTRL value 1438 shl rdx, 32 1439 or rdx, rax ; rdx = guest IA32_SPEC_CTRL value 1440 ; HACK ALERT! Boldly ASSUMES that CPUMCTXMSRS follows immediately after GstCtx (CPUMCTX). 1441 mov [r8 + CPUMCTX_size + CPUMCTXMSRS.msr.SpecCtrl], rdx ; saved guest IA32_SPEC_CTRL 1442 cmp rdx, r10 ; avoid wrmsr if we can. 1443 je .skip_spec_ctrl_restore 1444 mov eax, r10d 1445 mov rdx, r10 1446 shr rdx, 32 1447 wrmsr 1448 .skip_spec_ctrl_restore: 1410 1449 %endif 1411 1450 … … 1486 1525 ; Instantiate the hmR0SvmVmRun various variations. 1487 1526 ; 1488 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0, 0 1489 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0, 0 1490 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, HM_WSF_IBPB_ENTRY, 0 1491 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, HM_WSF_IBPB_ENTRY, 0 1492 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_EXIT, 0 1493 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_EXIT, 0 1494 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1495 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1527 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl, 0, 0, 0 1528 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl, 1, 0, 0 1529 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl, 0, HM_WSF_IBPB_ENTRY, 0 1530 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl, 1, HM_WSF_IBPB_ENTRY, 0 1531 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl, 0, HM_WSF_IBPB_EXIT, 0 1532 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl, 1, HM_WSF_IBPB_EXIT, 0 1533 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1534 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1535 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl, 0, HM_WSF_SPEC_CTRL, 0 1536 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl, 1, HM_WSF_SPEC_CTRL, 0 1537 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 0 1538 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 0 1539 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 0 1540 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 0 1541 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1542 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0 1543 1496 1544 %ifdef VBOX_WITH_KERNEL_USING_XMM 1497 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1 1498 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1 1499 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY, 1 1500 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY, 1 1501 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_EXIT, 1 1502 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_EXIT, 1 1503 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1504 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1505 1506 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2 1507 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2 1508 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY, 2 1509 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY, 2 1510 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_EXIT, 2 1511 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_EXIT, 2 1512 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1513 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1545 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 0, 0, 1 1546 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 1, 0, 1 1547 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_ENTRY, 1 1548 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_ENTRY, 1 1549 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_EXIT, 1 1550 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_EXIT, 1 1551 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1552 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1553 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL, 1 1554 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL, 1 1555 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 1 1556 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 1 1557 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 1 1558 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 1 1559 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1560 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1 1561 1562 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave, 0, 0, 2 1563 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave, 1, 0, 2 1564 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave, 0, HM_WSF_IBPB_ENTRY, 2 1565 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave, 1, HM_WSF_IBPB_ENTRY, 2 1566 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave, 0, HM_WSF_IBPB_EXIT, 2 1567 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave, 1, HM_WSF_IBPB_EXIT, 2 1568 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1569 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1570 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave, 0, HM_WSF_SPEC_CTRL, 2 1571 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave, 1, HM_WSF_SPEC_CTRL, 2 1572 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 2 1573 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY, 2 1574 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 2 1575 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT, 2 1576 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1577 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2 1514 1578 %endif 1515 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r107113 r107854 692 692 static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] = 693 693 { 694 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit }, 695 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit }, 696 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit }, 697 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit }, 698 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit }, 699 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit }, 700 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit }, 701 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit }, 694 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl }, 695 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl }, 696 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl }, 697 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl }, 698 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl }, 699 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl }, 700 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl }, 701 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl }, 702 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl }, 703 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl }, 704 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl }, 705 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl }, 706 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl }, 707 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl }, 708 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl }, 709 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl }, 702 710 }; 703 711 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0) 704 712 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0) 705 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0); 713 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0) 714 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_SPEC_CTRL ? 8 : 0); 706 715 PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn; 707 716 if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun) -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r107731 r107854 2529 2529 * #GP), but we don't currently do so for performance raisins/laziness. 2530 2530 */ 2531 if (pVM->cpum.ro.GuestFeatures.fIbpb )2531 if (pVM->cpum.ro.GuestFeatures.fIbpb /* && g_CpumHostFeatures.s.fIbpb*/) 2532 2532 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD_WR); 2533 if (pVM->cpum.ro.GuestFeatures.fFlushCmd )2533 if (pVM->cpum.ro.GuestFeatures.fFlushCmd && g_CpumHostFeatures.s.fFlushCmd) 2534 2534 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR); 2535 if (pVM->cpum.ro.GuestFeatures.fIbrs )2535 if (pVM->cpum.ro.GuestFeatures.fIbrs && g_CpumHostFeatures.s.fIbrs) 2536 2536 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR); 2537 2537 … … 4117 4117 /* Speculation Control (R/W). */ 4118 4118 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS); 4119 if (pVM->cpum.ro.GuestFeatures.fIbrs )4119 if (pVM->cpum.ro.GuestFeatures.fIbrs && g_CpumHostFeatures.s.fIbrs) 4120 4120 { 4121 4121 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r107749 r107854 132 132 * insert. 133 133 */ 134 static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PC PUMCPUIDLEAF pNewLeaf)134 static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCCPUMCPUIDLEAF pNewLeaf) 135 135 { 136 136 /* … … 1040 1040 bool fForceVme; 1041 1041 bool fNestedHWVirt; 1042 bool fSpecCtrl; 1042 1043 1043 1044 CPUMISAEXTCFG enmCmpXchg16b; … … 2393 2394 * currently not doing the apic id assignments in a compatible manner. 2394 2395 */ 2396 bool fAmdGstSupIbpb = false; /* Used below. */ 2395 2397 uSubLeaf = 0; 2396 2398 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL) … … 2401 2403 { 2402 2404 /* Expose XSaveErPtr aka RstrFpErrPtrs to guest. */ 2403 pCurLeaf->uEbx &= X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR; /* reserved - [12] == IBPB */ 2405 pCurLeaf->uEbx &= 0 2406 //| X86_CPUID_AMD_EFEID_EBX_CLZERO 2407 //| X86_CPUID_AMD_EFEID_EBX_IRPERF 2408 //| X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR 2409 //| X86_CPUID_AMD_EFEID_EBX_INVLPGB 2410 //| X86_CPUID_AMD_EFEID_EBX_RDPRU 2411 //| X86_CPUID_AMD_EFEID_EBX_BE 2412 //| X86_CPUID_AMD_EFEID_EBX_MCOMMIT 2413 | (pConfig->fSpecCtrl || PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, true) 2414 ? X86_CPUID_AMD_EFEID_EBX_IBPB : 0) 2415 //| X86_CPUID_AMD_EFEID_EBX_INT_WBINVD 2416 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS : 0) 2417 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP : 0) 2418 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_ALWAYS_ON : 0) 2419 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP_ALWAYS_ON : 0) 2420 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_PREFERRED : 0) 2421 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_SAME_MODE : 0) 2422 //| X86_CPUID_AMD_EFEID_EBX_NO_EFER_LMSLE 2423 //| X86_CPUID_AMD_EFEID_EBX_INVLPGB_NESTED_PAGES 2424 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_SPEC_CTRL_SSBD : 0) 2425 /// @todo | X86_CPUID_AMD_EFEID_EBX_VIRT_SPEC_CTRL_SSBD 2426 | X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED 2427 //| X86_CPUID_AMD_EFEID_EBX_CPPC 2428 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_PSFD : 0) 2429 | X86_CPUID_AMD_EFEID_EBX_BTC_NO 2430 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBPB_RET : 0); 2431 2432 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEbx, IBPB, X86_CPUID_AMD_EFEID_EBX_IBPB, pConfig->enmFlushCmdMsr); 2433 2434 /* Sharing this forced setting with intel would maybe confuse guests... */ 2435 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS) 2436 pCurLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_IBPB; 2437 2438 fAmdGstSupIbpb = RT_BOOL(pCurLeaf->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB); 2404 2439 } 2405 2440 else … … 2551 2586 } 2552 2587 2553 /* Cpuid 0x8000001f...0x8ffffffd: Unknown. 2588 /* Cpuid 0x80000020: Platform Quality of Service (PQOS), may have subleaves. 2589 * For now we just zero it. */ 2590 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000020), 0); 2591 if (pCurLeaf) 2592 { 2593 pCurLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pCurLeaf); 2594 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000020)); 2595 } 2596 2597 /* Cpuid 0x80000021: Extended Feature 2 (Zen3+?). 2598 * 2599 */ 2600 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000021), 0); 2601 if (pCurLeaf) 2602 { 2603 /** @todo sanitize these bits! */ 2604 pCurLeaf->uEax = 0; 2605 pCurLeaf->uEbx = 0; 2606 pCurLeaf->uEcx = 0; 2607 pCurLeaf->uEdx = 0; 2608 } 2609 /* Linux expects us as a hypervisor to insert this leaf for Zen 1 & 2 CPUs 2610 iff IBPB is available to the guest. This is also documented by AMD in 2611 "TECHNICAL UPDATE REGARDING SPECULATIVE RETURN STACK OVERFLOW" rev 2.0 2612 dated 2024-02-00. */ 2613 else if ( fAmdGstSupIbpb 2614 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 2615 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 2616 && (pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0)) != NULL 2617 && RTX86GetCpuFamily(pExtFeatureLeaf->uEax) == 0x17) 2618 { 2619 static CPUMCPUIDLEAF const s_NewLeaf = 2620 { 2621 /* .uLeaf =*/ UINT32_C(0x80000021), 2622 /* .uSubLeaf = */ 0, 2623 /* .fSubLeafMask = */ 0, 2624 /* .uEax = */ X86_CPUID_AMD_21_EAX_IBPB_BRTYPE, 2625 /* .uEbx = */ 0, 2626 /* .uEcx = */ 0, 2627 /* .uEdx = */ 0, 2628 /* .fFlags = */ 0, 2629 }; 2630 int const rc2 = cpumR3CpuIdInsert(NULL, &pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves, &s_NewLeaf); 2631 AssertRC(rc2); 2632 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), 0); 2633 if (pCurLeaf && pCurLeaf->uEax < UINT32_C(0x80000021)) 2634 pCurLeaf->uEax = UINT32_C(0x80000021); 2635 } 2636 2637 /* Cpuid 0x80000022...0x8ffffffd: Unknown. 2554 2638 * We don't know these and what they mean, so remove them. */ 2555 2639 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves, 2556 UINT32_C(0x800000 1f), UINT32_C(0x8ffffffd));2640 UINT32_C(0x80000022), UINT32_C(0x8ffffffd)); 2557 2641 2558 2642 /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf. … … 2818 2902 * leaf are removed. The default is set to what we're able to sanitize. 2819 2903 */ 2820 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x800000 1e));2904 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x80000021)); 2821 2905 AssertLogRelRCReturn(rc, rc); 2822 2906 … … 2828 2912 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004)); 2829 2913 AssertLogRelRCReturn(rc, rc); 2914 2915 /** @cfgm{/CPUM/SpecCtrl, bool, false} 2916 * Enables passing thru IA32_SPEC_CTRL and associated CPU bugfixes. 2917 */ 2918 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &pConfig->fSpecCtrl, false); 2919 AssertRCReturn(rc, rc); 2830 2920 2831 2921 #ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */ … … 3533 3623 */ 3534 3624 if (RT_SUCCESS(rc)) 3535 rc = cpumR3MsrReconcileWithCpuId(pVM );3625 rc = cpumR3MsrReconcileWithCpuId(pVM, false, false); 3536 3626 /* 3537 3627 * MSR fudging. … … 3599 3689 3600 3690 /* Check if speculation control is enabled. */ 3601 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false); 3602 AssertRCReturn(rc, rc); 3603 if (fEnable) 3691 if (Config.fSpecCtrl) 3604 3692 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL); 3605 3693 else … … 3638 3726 if (pLeaf) 3639 3727 { 3640 pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_ NO_SSBD_REQUIRED;3728 pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED; 3641 3729 LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n")); 3642 3730 } … … 3956 4044 */ 3957 4045 case CPUMCPUIDFEATURE_SPEC_CTRL: 4046 { 4047 AssertReturnVoid(!pVM->cpum.s.GuestFeatures.fSpeculationControl); /* should only be done once! */ 4048 4049 #ifdef RT_ARCH_AMD64 4050 if (!pVM->cpum.s.HostFeatures.s.fIbpb && !pVM->cpum.s.HostFeatures.s.fIbrs) 4051 { 4052 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n")); 4053 return; 4054 } 4055 #endif 4056 bool fForceSpecCtrl = false; 4057 bool fForceFlushCmd = false; 4058 4059 /* 4060 * Intel spread feature info around a bit... 4061 */ 3958 4062 if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 3959 4063 { 3960 4064 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0); 3961 #ifdef RT_ARCH_AMD64 3962 if ( !pLeaf 3963 || !(pVM->cpum.s.HostFeatures.s.fIbpb || pVM->cpum.s.HostFeatures.s.fIbrs)) 4065 if (!pLeaf) 3964 4066 { 3965 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));4067 LogRel(("CPUM: WARNING! Can't turn on Speculation Control on Intel CPUs without leaf 0x00000007!\n")); 3966 4068 return; 3967 4069 } 3968 #else 3969 if (!pLeaf) 3970 { 3971 LogRel(("CPUM: WARNING! Can't turn on Speculation Control without leaf 0x00000007!\n")); 3972 return; 3973 } 3974 #endif 3975 3976 /* The feature can be enabled. Let's see what we can actually do. */ 3977 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1; 4070 4071 /* Okay, the feature can be enabled. Let's see what we can actually do. */ 3978 4072 3979 4073 #ifdef RT_ARCH_AMD64 … … 3985 4079 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB; 3986 4080 pVM->cpum.s.GuestFeatures.fIbrs = 1; 4081 pVM->cpum.s.GuestFeatures.fIbpb = 1; 3987 4082 #ifdef RT_ARCH_AMD64 3988 4083 if (pVM->cpum.s.HostFeatures.s.fStibp) … … 4043 4138 pVM->cpum.s.GuestFeatures.fBhiCtrl = 1; 4044 4139 } 4140 fForceSpecCtrl = true; 4045 4141 } 4046 4047 /* Make sure we have the speculation control MSR... */ 4048 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_SPEC_CTRL); 4049 if (!pMsrRange) 4050 { 4051 static CPUMMSRRANGE const s_SpecCtrl = 4052 { 4053 /*.uFirst =*/ MSR_IA32_SPEC_CTRL, /*.uLast =*/ MSR_IA32_SPEC_CTRL, 4054 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl, 4055 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0, 4056 /*.szName = */ "IA32_SPEC_CTRL" 4057 }; 4058 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl); 4059 AssertLogRelRC(rc); 4060 } 4061 4062 /* ... and the predictor command MSR. */ 4063 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_PRED_CMD); 4064 if (!pMsrRange) 4065 { 4066 /** @todo incorrect fWrGpMask. */ 4067 static CPUMMSRRANGE const s_SpecCtrl = 4068 { 4069 /*.uFirst =*/ MSR_IA32_PRED_CMD, /*.uLast =*/ MSR_IA32_PRED_CMD, 4070 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd, 4071 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0, 4072 /*.szName = */ "IA32_PRED_CMD" 4073 }; 4074 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl); 4075 AssertLogRelRC(rc); 4076 } 4077 4142 } 4143 4144 #ifdef RT_ARCH_AMD64 4145 if (pVM->cpum.s.HostFeatures.s.fFlushCmd) 4146 #endif 4147 { 4148 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD; 4149 pVM->cpum.s.GuestFeatures.fFlushCmd = 1; 4150 fForceFlushCmd = true; 4078 4151 } 4079 4152 … … 4082 4155 #endif 4083 4156 { 4084 /* Install the architectural capabilities MSR. */ 4085 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES); 4086 if (!pMsrRange) 4087 { 4088 static CPUMMSRRANGE const s_ArchCaps = 4089 { 4090 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES, 4091 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly, 4092 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ UINT64_MAX, 4093 /*.szName = */ "IA32_ARCH_CAPABILITIES" 4094 }; 4095 int rc = CPUMR3MsrRangesInsert(pVM, &s_ArchCaps); 4096 AssertLogRelRC(rc); 4097 } 4157 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP; 4158 pVM->cpum.s.GuestFeatures.fArchCap = 1; 4098 4159 4099 4160 /* Advertise IBRS_ALL if present at this point... */ … … 4103 4164 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL); 4104 4165 } 4105 4106 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n")); 4166 cpumCpuIdExplodeFeaturesX86SetSummaryBits(&pVM->cpum.s.GuestFeatures); 4107 4167 } 4168 /* 4169 * AMD does things in a different (better) way. No MSR with info, 4170 * it's all in various CPUID leaves. 4171 */ 4108 4172 else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 4109 4173 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 4110 4174 { 4111 4175 /* The precise details of AMD's implementation are not yet clear. */ 4176 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0); 4177 if (!pLeaf) 4178 { 4179 LogRel(("CPUM: WARNING! Can't turn on Speculation Control on AMD CPUs without leaf 0x80000008!\n")); 4180 return; 4181 } 4182 4183 /* We passthru all the host cpuid bits on AMD, see cpumR3CpuIdSanitize, 4184 and there is no code to clear/unset the feature. So, little to do. 4185 The only thing we could consider here, is to re-enable stuff 4186 suppressed for portability reasons. */ 4112 4187 } 4188 else 4189 break; 4190 4191 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n")); 4192 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1; 4193 cpumR3MsrReconcileWithCpuId(pVM, fForceFlushCmd, fForceSpecCtrl); 4113 4194 break; 4195 } 4114 4196 4115 4197 default: … … 5614 5696 DBGFREGSUBFIELD_RO("EferLmsleUnsupported\0" "EFER.LMSLE is unsupported", 20, 1, 0), 5615 5697 DBGFREGSUBFIELD_RO("INVLPGBnestedPages\0" "INVLPGB for nested translation", 21, 1, 0), 5698 DBGFREGSUBFIELD_RO("PPIN\0" "Protected processor inventory number", 23, 1, 0), 5616 5699 DBGFREGSUBFIELD_RO("SSBD\0" "Speculative Store Bypass Disable", 24, 1, 0), 5617 5700 DBGFREGSUBFIELD_RO("SsbdVirtSpecCtrl\0" "Use VIRT_SPEC_CTL for SSBD", 25, 1, 0), … … 5708 5791 5709 5792 static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc, 5710 uint32_t cchWidth)5793 const char *pszLeadIn, uint32_t cchWidth) 5711 5794 { 5795 if (pszLeadIn) 5796 pHlp->pfnPrintf(pHlp, 5797 "%s\n" 5798 " %-*s= guest (host)\n", 5799 pszLeadIn, 5800 cchWidth, "Mnemonic - Description"); 5801 5712 5802 uint32_t uCombined = uVal1 | uVal2; 5713 5803 for (uint32_t iBit = 0; iBit < 32; iBit++) … … 5787 5877 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 5788 5878 #endif 5789 pHlp->pfnPrintf(pHlp, "Features\n"); 5790 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 5791 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, 56); 5792 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, 56); 5879 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, "Features", 56); 5880 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, NULL, 56); 5793 5881 } 5794 5882 else … … 5826 5914 if (fVerbose) 5827 5915 { 5828 pHlp->pfnPrintf(pHlp, " Sub-leaf 0\n"); 5829 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 5830 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, 56); 5831 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56); 5916 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, "Sub-leaf 0", 56); 5917 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, NULL, 56); 5832 5918 if (pCurLeaf->uEdx || Host.uEdx) 5833 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);5919 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, NULL, 56); 5834 5920 } 5835 5921 else … … 5850 5936 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 5851 5937 if (pCurLeaf->uEbx || Host.uEbx) 5852 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub2EbxSubFields, 56);5938 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub2EbxSubFields, NULL, 56); 5853 5939 if (pCurLeaf->uEcx || Host.uEcx) 5854 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub2EcxSubFields, 56);5855 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub2EdxSubFields, 56);5940 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub2EcxSubFields, NULL, 56); 5941 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub2EdxSubFields, NULL, 56); 5856 5942 } 5857 5943 else … … 6240 6326 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 6241 6327 #endif 6242 pHlp->pfnPrintf(pHlp, "Ext Features\n"); 6243 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 6244 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, 56); 6245 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, 56); 6328 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, "Ext Features", 56); 6329 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, NULL, 56); 6246 6330 if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM) 6247 6331 { 6248 pHlp->pfnPrintf(pHlp, "SVM Feature Identification (leaf A):\n");6249 6332 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 6250 6333 ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); … … 6252 6335 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x8000000a), 0); 6253 6336 uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0; 6254 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 56); 6337 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 6338 "SVM Feature Identification (leaf A)", 56); 6255 6339 } 6256 6340 } … … 6369 6453 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34); 6370 6454 else 6371 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 56); 6455 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 6456 "APM Features EDX", 56); 6372 6457 } 6373 6458 } … … 6384 6469 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34); 6385 6470 else 6386 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56); 6471 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 6472 "Ext Features ext IDs EBX", 56); 6387 6473 } 6388 6474 6389 6475 if (iVerbosity) 6390 6476 { 6391 uint32_t uEAX = pCurLeaf->uEax; 6392 uint32_t uECX = pCurLeaf->uEcx; 6393 6394 /** @todo 0x80000008:EAX[23:16] is only defined for AMD. We'll get 0 on Intel. On 6395 * AMD if we get 0, the guest physical address width should be taken from 6396 * 0x80000008:EAX[7:0] instead. Guest Physical address width is relevant 6397 * for guests using nested paging. */ 6477 uint32_t const uEAX = pCurLeaf->uEax; 6398 6478 pHlp->pfnPrintf(pHlp, 6399 6479 "Physical Address Width: %d bits\n" 6400 "Virtual Address Width: %d bits\n" 6401 "Guest Physical Address Width: %d bits\n", 6480 "Virtual Address Width: %d bits\n", 6402 6481 (uEAX >> 0) & 0xff, 6403 (uEAX >> 8) & 0xff, 6404 (uEAX >> 16) & 0xff); 6405 6406 /** @todo 0x80000008:ECX is reserved on Intel (we'll get incorrect physical core 6407 * count here). */ 6408 pHlp->pfnPrintf(pHlp, 6409 "Physical Core Count: %d\n", 6410 ((uECX >> 0) & 0xff) + 1); 6482 (uEAX >> 8) & 0xff); 6483 if ( ((uEAX >> 16) & 0xff) != 0 6484 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 6485 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 6486 pHlp->pfnPrintf(pHlp, "Guest Physical Address Width: %d bits%s\n", 6487 (uEAX >> 16) & 0xff ? (uEAX >> 16) & 0xff : (uEAX >> 0) & 0xff, 6488 (uEAX >> 16) & 0xff ? "" : " (0)"); 6489 6490 uint32_t const uECX = pCurLeaf->uEcx; 6491 if ( ((uECX >> 0) & 0xff) != 0 6492 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 6493 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 6494 { 6495 uint32_t const cPhysCoreCount = ((uECX >> 0) & 0xff) + 1; 6496 uint32_t const cApicIdSize = (uECX >> 12) & 0xf ? RT_BIT_32((uECX >> 12) & 0xf) : cPhysCoreCount; 6497 pHlp->pfnPrintf(pHlp, 6498 "Physical Core Count: %d\n" 6499 "APIC ID size: %u (%#x)\n" 6500 "Performance TSC size: %u bits\n", 6501 cPhysCoreCount, 6502 cApicIdSize, cApicIdSize, 6503 (((uECX >> 16) & 0x3) << 3) + 40); 6504 } 6505 uint32_t const uEDX = pCurLeaf->uEax; 6506 if (uEDX) 6507 pHlp->pfnPrintf(pHlp, 6508 "Max page count for INVLPGB: %#x\n" 6509 "Max ECX for RDPRU: %#x\n", 6510 (uEDX & 0xffff), uEDX >> 16); 6411 6511 } 6412 6512 } -
trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
r107778 r107854 587 587 * @returns VBox status code. 588 588 * @param pVM The cross context VM structure. 589 */ 590 int cpumR3MsrReconcileWithCpuId(PVM pVM) 591 { 592 PCCPUMMSRRANGE papToAdd[10]; 589 * @param fForceFlushCmd Make sure MSR_IA32_FLUSH_CMD is present. 590 * @param fForceSpecCtrl Make sure MSR_IA32_SPEC_CTRL is present. 591 */ 592 DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl) 593 { 594 PCCPUMMSRRANGE apToAdd[10]; 593 595 uint32_t cToAdd = 0; 594 596 … … 596 598 * The IA32_FLUSH_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates. 597 599 */ 598 if (pVM->cpum.s.GuestFeatures.fFlushCmd && !cpumLookupMsrRange(pVM, MSR_IA32_FLUSH_CMD)) 600 if ( pVM->cpum.s.GuestFeatures.fFlushCmd 601 || fForceFlushCmd) 599 602 { 600 603 static CPUMMSRRANGE const s_FlushCmd = … … 611 614 /*.szName = */ "IA32_FLUSH_CMD" 612 615 }; 613 papToAdd[cToAdd++] = &s_FlushCmd; 616 apToAdd[cToAdd++] = &s_FlushCmd; 617 } 618 619 /* 620 * The IA32_PRED_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates. 621 */ 622 if ( pVM->cpum.s.GuestFeatures.fIbpb 623 /** @todo || pVM->cpum.s.GuestFeatures.fSbpb*/) 624 { 625 static CPUMMSRRANGE const s_PredCmd = 626 { 627 /*.uFirst =*/ MSR_IA32_PRED_CMD, 628 /*.uLast =*/ MSR_IA32_PRED_CMD, 629 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, 630 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd, 631 /*.offCpumCpu =*/ UINT16_MAX, 632 /*.fReserved =*/ 0, 633 /*.uValue =*/ 0, 634 /*.fWrIgnMask =*/ 0, 635 /*.fWrGpMask =*/ ~MSR_IA32_PRED_CMD_F_IBPB, 636 /*.szName = */ "IA32_PRED_CMD" 637 }; 638 apToAdd[cToAdd++] = &s_PredCmd; 639 } 640 641 /* 642 * The IA32_SPEC_CTRL MSR was introduced in MCUs for CVS-2018-3646 and associates. 643 */ 644 if ( pVM->cpum.s.GuestFeatures.fSpecCtrlMsr 645 || fForceSpecCtrl) 646 { 647 static CPUMMSRRANGE const s_SpecCtrl = 648 { 649 /*.uFirst =*/ MSR_IA32_SPEC_CTRL, 650 /*.uLast =*/ MSR_IA32_SPEC_CTRL, 651 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, 652 /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl, 653 /*.offCpumCpu =*/ UINT16_MAX, 654 /*.fReserved =*/ 0, 655 /*.uValue =*/ 0, 656 /*.fWrIgnMask =*/ 0, 657 /*.fWrGpMask =*/ 0, 658 /*.szName = */ "IA32_SPEC_CTRL" 659 }; 660 apToAdd[cToAdd++] = &s_SpecCtrl; 614 661 } 615 662 … … 618 665 * documented in relation to such. 619 666 */ 620 if (pVM->cpum.s.GuestFeatures.fArchCap && !cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES))667 if (pVM->cpum.s.GuestFeatures.fArchCap) 621 668 { 622 669 static CPUMMSRRANGE const s_ArchCaps = … … 633 680 /*.szName = */ "IA32_ARCH_CAPABILITIES" 634 681 }; 635 papToAdd[cToAdd++] = &s_ArchCaps;682 apToAdd[cToAdd++] = &s_ArchCaps; 636 683 } 637 684 … … 639 686 * Do the adding. 640 687 */ 688 Assert(cToAdd <= RT_ELEMENTS(apToAdd)); 641 689 for (uint32_t i = 0; i < cToAdd; i++) 642 690 { 643 PCCPUMMSRRANGE pRange = papToAdd[i]; 644 LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName)); 645 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, 646 pRange); 647 if (RT_FAILURE(rc)) 648 return rc; 691 PCCPUMMSRRANGE pRange = apToAdd[i]; 692 Assert(pRange->uFirst == pRange->uLast); 693 if (!cpumLookupMsrRange(pVM, pRange->uFirst)) 694 { 695 LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName)); 696 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, 697 &pVM->cpum.s.GuestInfo.cMsrRanges, pRange); 698 AssertRCReturn(rc, rc); 699 } 649 700 } 650 701 return VINF_SUCCESS; -
trunk/src/VBox/VMM/include/CPUMInternal.h
r107700 r107854 543 543 # ifdef RT_ARCH_AMD64 544 544 AssertCompileMemberAlignment(CPUMCPU, Host, 64); 545 AssertCompileAdjacentMembers(CPUMCPU, Guest, GuestMsrs); /* HACK ALERT! HMR0A.asm makes this ASSUMPTION in the SVM RUN code! */ 545 546 # endif 546 547 #endif … … 563 564 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, 564 565 CPUMFEATURESX86 *pFeatures); 566 void cpumCpuIdExplodeFeaturesX86SetSummaryBits(CPUMFEATURESX86 *pFeatures); 567 void cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal); 565 568 # endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 566 569 # if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) … … 591 594 # ifdef VBOX_VMM_TARGET_X86 592 595 int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange); 593 int cpumR3MsrReconcileWithCpuId(PVM pVM);596 DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl); 594 597 int cpumR3MsrApplyFudge(PVM pVM); 595 598 int cpumR3MsrRegStats(PVM pVM); -
trunk/src/VBox/VMM/include/HMInternal.h
r106061 r107854 592 592 /** @addtogroup grp_hm_int_svm SVM Internal 593 593 * @{ */ 594 /** SVM VMRun function, see SVMR0VMRun(). */594 /** SVM VMRun function, see SVMR0VMRun(). */ 595 595 typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)); 596 596 /** Pointer to a SVM VMRun function. */ … … 1179 1179 /** Flush MDS buffers on VM entry. */ 1180 1180 #define HM_WSF_MDS_ENTRY RT_BIT_32(3) 1181 /** MSR_IA32_SPEC_CTRL needs to be replaced upon entry and exit. 1182 * Save host value on entry, load guest value, run guest, save guest value on 1183 * exit and restore the host value. 1184 * @todo may not reliable for VT-x/Intel. */ 1185 #define HM_WSF_SPEC_CTRL RT_BIT_32(4) 1181 1186 1182 1187 /** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */ … … 1244 1249 * @{ 1245 1250 */ 1246 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1247 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1248 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1249 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1250 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1251 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1252 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1253 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1251 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1252 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1253 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1254 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1255 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1256 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1257 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1258 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1259 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1260 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1261 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1262 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1263 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1264 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1265 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1266 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 1254 1267 /** @} */ 1255 1268 -
trunk/src/VBox/VMM/include/HMInternal.mac
r106061 r107854 276 276 %define HM_WSF_L1D_ENTRY RT_BIT_32(2) 277 277 %define HM_WSF_MDS_ENTRY RT_BIT_32(3) 278 278 %define HM_WSF_SPEC_CTRL RT_BIT_32(4) 279
Note:
See TracChangeset
for help on using the changeset viewer.