VirtualBox

Changeset 107854 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jan 18, 2025 11:59:26 PM (3 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167053
Message:

x86.h,VMM: More AMD CPUID bits; addressed some old todos related to these; fixed bugs in svn & vmx world switcher (sanity checks, ++). jiraref:VBP-947 bugref:10738

Location:
trunk/src/VBox/VMM
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp

    r107749 r107854  
    14031403
    14041404
     1405void cpumCpuIdExplodeFeaturesX86SetSummaryBits(CPUMFEATURESX86 *pFeatures)
     1406{
     1407    /* Summary or all bits indicating the presence of the IA32_SPEC_CTRL MSR. */
     1408    pFeatures->fSpecCtrlMsr = pFeatures->fIbrs
     1409                            | pFeatures->fStibp
     1410                            | pFeatures->fSsbd
     1411                            | pFeatures->fPsfd
     1412                            | pFeatures->fIpredCtrl
     1413                            | pFeatures->fRrsbaCtrl
     1414                            | pFeatures->fDdpdU
     1415                            | pFeatures->fBhiCtrl
     1416                            ;
     1417}
     1418
     1419
    14051420int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, CPUMFEATURESX86 *pFeatures)
    14061421{
     
    15651580            pFeatures->fMmx            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX);
    15661581            pFeatures->fTsc            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC);
    1567             pFeatures->fIbpb           |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
    15681582            pFeatures->fAmdMmxExts      = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
    15691583            pFeatures->fXop             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
    15701584            pFeatures->fTbm             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_TBM);
    15711585            pFeatures->fSvm             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM);
     1586
     1587            if (pExtLeaf8)
     1588            {
     1589                pFeatures->fIbpb      |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
     1590                pFeatures->fIbrs      |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBRS);
     1591                pFeatures->fStibp     |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_STIBP);
     1592                pFeatures->fSsbd      |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_SPEC_CTRL_SSBD);
     1593                pFeatures->fPsfd      |= RT_BOOL(pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_PSFD);
     1594            }
     1595
     1596            PCCPUMCPUIDLEAF pExtLeaf21 = cpumCpuIdFindLeaf(paLeaves, cLeaves, 0x80000021);
     1597            if (pExtLeaf21)
     1598            {
     1599                /** @todo IBPB_BRTYPE is implied on Zen 1 & 2.
     1600                 *  https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf */
     1601            }
     1602
    15721603            if (pFeatures->fSvm)
    15731604            {
     
    16551686    else
    16561687        AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1);
     1688
     1689    cpumCpuIdExplodeFeaturesX86SetSummaryBits(pFeatures);
    16571690    return VINF_SUCCESS;
    16581691}
     
    16621695 * Helper for extracting feature bits from IA32_ARCH_CAPABILITIES.
    16631696 */
    1664 static void cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal)
     1697void cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal)
    16651698{
    16661699    Assert(fHasArchCap || fArchVal == 0);
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r107703 r107854  
    165165    /*
    166166     * Get MSR_IA32_ARCH_CAPABILITIES and expand it into the host feature structure.
     167     *
     168     * AMD CPUs doesn't have this register, similar info is available in EBX in
     169     * CPUID leaf 0x80000008
    167170     */
    168171    if (ASMHasCpuId())
    169172    {
    170         /** @todo Should add this MSR to CPUMMSRS and expose it via SUPDrv... */
    171         g_CpumHostFeatures.s.fArchRdclNo             = 0;
    172         g_CpumHostFeatures.s.fArchIbrsAll            = 0;
    173         g_CpumHostFeatures.s.fArchRsbOverride        = 0;
    174         g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = 0;
    175         g_CpumHostFeatures.s.fArchMdsNo              = 0;
    176173        uint32_t const cStdRange = ASMCpuId_EAX(0);
    177174        if (   RTX86IsValidStdRange(cStdRange)
     
    183180            if (   (fStdExtFeaturesEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
    184181                && (fStdFeaturesEdx    & X86_CPUID_FEATURE_EDX_MSR))
    185             {
    186                 uint64_t fArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES);
    187                 g_CpumHostFeatures.s.fArchRdclNo             = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO);
    188                 g_CpumHostFeatures.s.fArchIbrsAll            = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL);
    189                 g_CpumHostFeatures.s.fArchRsbOverride        = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO);
    190                 g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D);
    191                 g_CpumHostFeatures.s.fArchMdsNo              = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO);
    192             }
    193             else
    194                 g_CpumHostFeatures.s.fArchCap = 0;
     182                cpumCpuIdExplodeArchCapabilities(&g_CpumHostFeatures.s, true, ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES));
    195183        }
    196184    }
     
    363351         * Note! We assume this happens after the CPUMR3Init is done, so CPUID bits are settled.
    364352         */
    365         uint64_t       fHostArchVal = 0;
    366         bool           fHasArchCap  = false;
    367         uint32_t const cStdRange    = ASMCpuId_EAX(0);
     353        /** @todo Should add this MSR to CPUMMSRS and expose it via SUPDrv... */
     354        uint32_t const cStdRange = ASMCpuId_EAX(0);
    368355        if (   RTX86IsValidStdRange(cStdRange)
    369356            && cStdRange >= 7)
     
    373360            if (   (fEdxFeatures & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
    374361                && (fFeatures & X86_CPUID_FEATURE_EDX_MSR))
    375             {
    376                 fHostArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES);
    377                 fHasArchCap  = true;
    378             }
    379         }
    380         CPUMCpuIdApplyX86HostArchCapabilities(pVM, fHasArchCap, fHostArchVal);
     362                CPUMCpuIdApplyX86HostArchCapabilities(pVM, true, ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES));
     363        }
    381364
    382365        /*
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r106061 r107854  
    13321332     * Configure defences against spectre and other CPU bugs.
    13331333     */
     1334    /* Determin the flags: */
    13341335    uint32_t fWorldSwitcher = 0;
    1335     uint32_t cLastStdLeaf   = ASMCpuId_EAX(0);
    1336     if (cLastStdLeaf >= 0x00000007 && RTX86IsValidStdRange(cLastStdLeaf))
    1337     {
    1338         uint32_t uEdx = 0;
    1339         ASMCpuIdExSlow(0x00000007, 0, 0, 0, NULL, NULL, NULL, &uEdx);
    1340 
    1341         if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB)
    1342         {
    1343             if (pVM->hm.s.fIbpbOnVmExit)
    1344                 fWorldSwitcher |= HM_WSF_IBPB_EXIT;
    1345             if (pVM->hm.s.fIbpbOnVmEntry)
    1346                 fWorldSwitcher |= HM_WSF_IBPB_ENTRY;
    1347         }
    1348         if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
    1349         {
    1350             if (pVM->hm.s.fL1dFlushOnVmEntry)
    1351                 fWorldSwitcher |= HM_WSF_L1D_ENTRY;
    1352             else if (pVM->hm.s.fL1dFlushOnSched)
    1353                 fWorldSwitcher |= HM_WSF_L1D_SCHED;
    1354         }
    1355         if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
    1356         {
    1357             if (pVM->hm.s.fMdsClearOnVmEntry)
    1358                 fWorldSwitcher |= HM_WSF_MDS_ENTRY;
    1359             else if (pVM->hm.s.fMdsClearOnSched)
    1360                 fWorldSwitcher |= HM_WSF_MDS_SCHED;
    1361         }
    1362     }
     1336    if (g_CpumHostFeatures.s.fIbpb)
     1337    {
     1338        if (pVM->hm.s.fIbpbOnVmExit)
     1339            fWorldSwitcher |= HM_WSF_IBPB_EXIT;
     1340        if (pVM->hm.s.fIbpbOnVmEntry)
     1341            fWorldSwitcher |= HM_WSF_IBPB_ENTRY;
     1342    }
     1343    if (g_CpumHostFeatures.s.fFlushCmd)
     1344    {
     1345        if (pVM->hm.s.fL1dFlushOnVmEntry)
     1346            fWorldSwitcher |= HM_WSF_L1D_ENTRY;
     1347        else if (pVM->hm.s.fL1dFlushOnSched)
     1348            fWorldSwitcher |= HM_WSF_L1D_SCHED;
     1349    }
     1350    if (g_CpumHostFeatures.s.fMdsClear)
     1351    {
     1352        if (pVM->hm.s.fMdsClearOnVmEntry)
     1353            fWorldSwitcher |= HM_WSF_MDS_ENTRY;
     1354        else if (pVM->hm.s.fMdsClearOnSched)
     1355            fWorldSwitcher |= HM_WSF_MDS_SCHED;
     1356    }
     1357    if (g_CpumHostFeatures.s.fSpecCtrlMsr)
     1358    {
     1359        /** @todo this may be too early for intel? */
     1360        if (pVM->cpum.ro.GuestFeatures.fSpecCtrlMsr)
     1361            fWorldSwitcher |= HM_WSF_SPEC_CTRL;
     1362    }
     1363
     1364    /* Distribute the flags. */
    13631365    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    13641366    {
     
    13671369    }
    13681370    pVM->hm.s.ForR3.fWorldSwitcher = fWorldSwitcher;
    1369 
    13701371
    13711372    /*
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r106061 r107854  
    522522;
    523523; @note Important that this does not modify cbFrame or rsp.
     524; @note HM_WSF_SPEC_CTRL is handled differently here at the moment.
    524525%macro RESTORE_STATE_VMX 4
    525526        ; Restore base and limit of the IDTR & GDTR.
     
    586587 %endif
    587588
    588  %if %3 & HM_WSF_IBPB_EXIT
     589 %if (%3) & HM_WSF_IBPB_EXIT
    589590        ; Fight spectre (trashes rax, rdx and rcx).
    590591  %if %1 = 0 ; Skip this in failure branch (=> guru)
     
    744745
    745746        mov     eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
    746         and     eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT
    747         cmp     eax, %3
     747        and     eax, HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY | HM_WSF_MDS_ENTRY | HM_WSF_IBPB_EXIT ; | HM_WSF_SPEC_CTRL
     748        cmp     eax, (%3)
    748749        mov     eax, VERR_VMX_STARTVM_PRECOND_1
    749750        jne     NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
     
    866867        ; Fight spectre and similar. Trashes rax, rcx, and rdx.
    867868        ;
    868  %if %3 & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY)  ; The eax:edx value is the same for the first two.
     869 %if (%3) & (HM_WSF_IBPB_ENTRY | HM_WSF_L1D_ENTRY)  ; The eax:edx value is the same for the first two.
    869870        AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
    870871        mov     eax, MSR_IA32_PRED_CMD_F_IBPB
    871872        xor     edx, edx
    872873 %endif
    873  %if %3 & HM_WSF_IBPB_ENTRY             ; Indirect branch barrier.
     874 %if (%3) & HM_WSF_IBPB_ENTRY             ; Indirect branch barrier.
    874875        mov     ecx, MSR_IA32_PRED_CMD
    875876        wrmsr
    876877 %endif
    877  %if %3 & HM_WSF_L1D_ENTRY              ; Level 1 data cache flush.
     878 %if (%3) & HM_WSF_L1D_ENTRY              ; Level 1 data cache flush.
    878879        mov     ecx, MSR_IA32_FLUSH_CMD
    879880        wrmsr
    880  %elif %3 & HM_WSF_MDS_ENTRY            ; MDS flushing is included in L1D_FLUSH
     881 %elif (%3) & HM_WSF_MDS_ENTRY            ; MDS flushing is included in L1D_FLUSH
    881882        mov     word [rbp + frm_MDS_seg], ds
    882883        verw    word [rbp + frm_MDS_seg]
     
    998999 %endif  ; %4 != 0
    9991000
     1001.return_with_restored_preserved_registers:
    10001002        lea     rsp, [rbp + frm_fRFlags]
    10011003        popf
     
    10301032   %error Bad frame size value: cbFrame, expected cbBaseFrame
    10311033  %endif
    1032         jmp     .vmstart64_end
     1034        jmp     .return_with_restored_preserved_registers
    10331035 %endif
    10341036
     
    10991101; @param    1   The suffix of the variation.
    11001102; @param    2   fLoadSaveGuestXcr0 value
    1101 ; @param    3   The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT value.
     1103; @param    3   The HM_WSF_IBPB_ENTRY + HM_WSF_IBPB_EXIT + HM_WSF_SPEC_CTRL value.
    11021104; @param    4   The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
    11031105;               Drivers shouldn't use AVX registers without saving+loading:
     
    11481150        SEH64_SET_FRAME_xBP 0
    11491151        pushf
    1150   %assign cbFrame            30h
     1152  %assign cbFrame            40h
    11511153 %if %4 != 0
    11521154  %assign cbFrame            cbFrame + 16 * 11  ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit)
     
    11611163 %define frm_pGstCtx         -028h              ; Where we stash guest CPU context for use after the vmrun.
    11621164 %define frm_HCPhysVmcbHost  -030h              ; Where we stash HCPhysVmcbHost for the vmload after vmrun.
     1165 %define frm_uHostSpecCtrl   -040h              ; Saved IA32_MSR_SPEC_CTRL value.
    11631166 %if %4 != 0
    1164   %define frm_saved_xmm6     -040h
    1165   %define frm_saved_xmm7     -050h
    1166   %define frm_saved_xmm8     -060h
    1167   %define frm_saved_xmm9     -070h
    1168   %define frm_saved_xmm10    -080h
    1169   %define frm_saved_xmm11    -090h
    1170   %define frm_saved_xmm12    -0a0h
    1171   %define frm_saved_xmm13    -0b0h
    1172   %define frm_saved_xmm14    -0c0h
    1173   %define frm_saved_xmm15    -0d0h
    1174   %define frm_saved_mxcsr    -0e0h
     1167  %define frm_saved_xmm6     -050h
     1168  %define frm_saved_xmm7     -060h
     1169  %define frm_saved_xmm8     -070h
     1170  %define frm_saved_xmm9     -080h
     1171  %define frm_saved_xmm10    -090h
     1172  %define frm_saved_xmm11    -0a0h
     1173  %define frm_saved_xmm12    -0b0h
     1174  %define frm_saved_xmm13    -0c0h
     1175  %define frm_saved_xmm14    -0d0h
     1176  %define frm_saved_xmm15    -0e0h
     1177  %define frm_saved_mxcsr    -0f0h
    11751178 %endif
    11761179
     
    12081211
    12091212        mov     eax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fWorldSwitcher]
    1210         and     eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT
    1211         cmp     eax, %3
     1213        and     eax, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT | HM_WSF_SPEC_CTRL
     1214        cmp     eax, (%3)
    12121215        mov     eax, VERR_SVM_VMRUN_PRECOND_1
    12131216        jne     .failure_return
     
    12911294 %endif
    12921295
     1296 %if (%3) & HM_WSF_SPEC_CTRL
     1297        ; Save host MSR_IA32_SPEC_CTRL and load the guest one (trashes rax, rdx, rcx, rbx).
     1298        ; HACK ALERT! Boldly ASSUMES that CPUMCTXMSRS follows immediately after GstCtx (CPUMCTX).
     1299        mov     rbx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX_size + CPUMCTXMSRS.msr.SpecCtrl] ; rbx = guest IA32_SPEC_CTRL
     1300        mov     ecx, MSR_IA32_SPEC_CTRL
     1301        rdmsr                               ; edx:eax = host IA32_SPEC_CTRL value
     1302        shl     rdx, 32
     1303        or      rdx, rax                    ; rdx = host IA32_SPEC_CTRL value
     1304        mov     [rbp + frm_uHostSpecCtrl], rdx
     1305        cmp     rdx, rbx                    ; avoid wrmsr if we can.
     1306        je      .skip_spec_ctrl_load
     1307        mov     eax, ebx
     1308        mov     rdx, rbx
     1309        shr     rdx, 32
     1310        wrmsr
     1311.skip_spec_ctrl_load:
     1312 %endif
     1313
    12931314        ; Save host fs, gs, sysenter msr etc.
    12941315        mov     rax, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.svm + HMR0CPUSVM.HCPhysVmcbHost]
     
    12981319        vmsave
    12991320
    1300  %if %3 & HM_WSF_IBPB_ENTRY
     1321 %if (%3) & HM_WSF_IBPB_ENTRY
    13011322        ; Fight spectre (trashes rax, rdx and rcx).
    13021323        mov     ecx, MSR_IA32_PRED_CMD
     
    13891410        mov     r15, [rbp + frm_saved_r15]
    13901411
    1391  %if %4 != 0
    1392         ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state.
     1412 %if %4 != 0 || ((%3) & HM_WSF_SPEC_CTRL)
     1413        ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state as well as for IA32_SPEC_CTRL.
    13931414        mov     r8, rcx
    13941415 %endif
    13951416
    1396  %if %3 & HM_WSF_IBPB_EXIT
     1417 %if (%3) & HM_WSF_IBPB_EXIT
    13971418        ; Fight spectre (trashes rax, rdx and rcx).
    13981419        mov     ecx, MSR_IA32_PRED_CMD
     
    14081429        mov     rax, [rbp + frm_uHostXcr0]
    14091430        xsetbv
     1431 %endif
     1432
     1433 %if (%3) & HM_WSF_SPEC_CTRL
     1434        ; Save guest MSR_IA32_SPEC_CTRL and load the host one (trashes rax, rdx, rcx, r10).
     1435        mov     r10, [rbp + frm_uHostSpecCtrl] ; r10 = host IA32_SPEC_CTRL
     1436        mov     ecx, MSR_IA32_SPEC_CTRL
     1437        rdmsr                               ; edx:eax = guest IA32_SPEC_CTRL value
     1438        shl     rdx, 32
     1439        or      rdx, rax                    ; rdx = guest IA32_SPEC_CTRL value
     1440        ; HACK ALERT! Boldly ASSUMES that CPUMCTXMSRS follows immediately after GstCtx (CPUMCTX).
     1441        mov     [r8 + CPUMCTX_size + CPUMCTXMSRS.msr.SpecCtrl], rdx ; saved guest IA32_SPEC_CTRL
     1442        cmp     rdx, r10                    ; avoid wrmsr if we can.
     1443        je      .skip_spec_ctrl_restore
     1444        mov     eax, r10d
     1445        mov     rdx, r10
     1446        shr     rdx, 32
     1447        wrmsr
     1448.skip_spec_ctrl_restore:
    14101449 %endif
    14111450
     
    14861525; Instantiate the hmR0SvmVmRun various variations.
    14871526;
    1488 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit,           0, 0,                                    0
    1489 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit,           1, 0,                                    0
    1490 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit,           0, HM_WSF_IBPB_ENTRY,                    0
    1491 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit,           1, HM_WSF_IBPB_ENTRY,                    0
    1492 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit,           0, HM_WSF_IBPB_EXIT,                     0
    1493 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit,           1, HM_WSF_IBPB_EXIT,                     0
    1494 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit,           0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
    1495 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit,           1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
     1527hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl,           0, 0,                                                       0
     1528hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl,           1, 0,                                                       0
     1529hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl,           0, HM_WSF_IBPB_ENTRY,                                       0
     1530hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl,           1, HM_WSF_IBPB_ENTRY,                                       0
     1531hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl,           0, HM_WSF_IBPB_EXIT,                                        0
     1532hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl,           1, HM_WSF_IBPB_EXIT,                                        0
     1533hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl,           0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    0
     1534hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl,           1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    0
     1535hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl,           0, HM_WSF_SPEC_CTRL,                                        0
     1536hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl,           1, HM_WSF_SPEC_CTRL,                                        0
     1537hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl,           0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    0
     1538hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl,           1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    0
     1539hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl,           0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     0
     1540hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl,           1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     0
     1541hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl,           0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
     1542hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl,           1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 0
     1543
    14961544%ifdef VBOX_WITH_KERNEL_USING_XMM
    1497 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0,                                    1
    1498 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0,                                    1
    1499 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY,                    1
    1500 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY,                    1
    1501 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_EXIT,                     1
    1502 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_EXIT,                     1
    1503 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
    1504 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
    1505 
    1506 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave,  0, 0,                                    2
    1507 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave,  1, 0,                                    2
    1508 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave,  0, HM_WSF_IBPB_ENTRY,                    2
    1509 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave,  1, HM_WSF_IBPB_ENTRY,                    2
    1510 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave,  0, HM_WSF_IBPB_EXIT,                     2
    1511 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave,  1, HM_WSF_IBPB_EXIT,                     2
    1512 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave,  0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
    1513 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave,  1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
     1545hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 0, 0,                                                       1
     1546hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 1, 0,                                                       1
     1547hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_ENTRY,                                       1
     1548hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_ENTRY,                                       1
     1549hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_EXIT,                                        1
     1550hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_EXIT,                                        1
     1551hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    1
     1552hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseManual, 1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    1
     1553hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL,                                        1
     1554hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL,                                        1
     1555hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    1
     1556hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    1
     1557hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     1
     1558hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     1
     1559hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
     1560hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseManual, 1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 1
     1561
     1562hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave,  0, 0,                                                       2
     1563hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave,  1, 0,                                                       2
     1564hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave,  0, HM_WSF_IBPB_ENTRY,                                       2
     1565hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl_SseXSave,  1, HM_WSF_IBPB_ENTRY,                                       2
     1566hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave,  0, HM_WSF_IBPB_EXIT,                                        2
     1567hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave,  1, HM_WSF_IBPB_EXIT,                                        2
     1568hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave,  0, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    2
     1569hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl_SseXSave,  1, HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT,                    2
     1570hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave,  0, HM_WSF_SPEC_CTRL,                                        2
     1571hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave,  1, HM_WSF_SPEC_CTRL,                                        2
     1572hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave,  0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    2
     1573hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl_SseXSave,  1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY,                    2
     1574hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave,  0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     2
     1575hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave,  1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_EXIT,                     2
     1576hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave,  0, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
     1577hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl_SseXSave,  1, HM_WSF_SPEC_CTRL | HM_WSF_IBPB_ENTRY | HM_WSF_IBPB_EXIT, 2
    15141578%endif
    1515 
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r107113 r107854  
    692692    static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] =
    693693    {
    694         { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit },
    695         { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit },
    696         { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit },
    697         { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit },
    698         { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit },
    699         { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit },
    700         { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit },
    701         { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
     694        { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl },
     695        { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl },
     696        { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl },
     697        { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl },
     698        { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl },
     699        { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl },
     700        { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl },
     701        { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl },
     702        { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl },
     703        { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl },
     704        { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl },
     705        { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl },
     706        { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl },
     707        { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl },
     708        { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl },
     709        { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl },
    702710    };
    703711    uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                 ? 1 : 0)
    704712                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
    705                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT  ? 4 : 0);
     713                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT  ? 4 : 0)
     714                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_SPEC_CTRL  ? 8 : 0);
    706715    PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn;
    707716    if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun)
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r107731 r107854  
    25292529     * #GP), but we don't currently do so for performance raisins/laziness.
    25302530     */
    2531     if (pVM->cpum.ro.GuestFeatures.fIbpb)
     2531    if (pVM->cpum.ro.GuestFeatures.fIbpb /*    && g_CpumHostFeatures.s.fIbpb*/)
    25322532        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD,  VMXMSRPM_ALLOW_RD_WR);
    2533     if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
     2533    if (pVM->cpum.ro.GuestFeatures.fFlushCmd && g_CpumHostFeatures.s.fFlushCmd)
    25342534        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
    2535     if (pVM->cpum.ro.GuestFeatures.fIbrs)
     2535    if (pVM->cpum.ro.GuestFeatures.fIbrs     && g_CpumHostFeatures.s.fIbrs)
    25362536        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
    25372537
     
    41174117        /* Speculation Control (R/W). */
    41184118        HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
    4119         if (pVM->cpum.ro.GuestFeatures.fIbrs)
     4119        if (pVM->cpum.ro.GuestFeatures.fIbrs && g_CpumHostFeatures.s.fIbrs)
    41204120        {
    41214121            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r107749 r107854  
    132132 *                      insert.
    133133 */
    134 static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
     134static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCCPUMCPUIDLEAF pNewLeaf)
    135135{
    136136    /*
     
    10401040    bool            fForceVme;
    10411041    bool            fNestedHWVirt;
     1042    bool            fSpecCtrl;
    10421043
    10431044    CPUMISAEXTCFG   enmCmpXchg16b;
     
    23932394     * currently not doing the apic id assignments in a compatible manner.
    23942395     */
     2396    bool fAmdGstSupIbpb = false; /* Used below. */
    23952397    uSubLeaf = 0;
    23962398    while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL)
     
    24012403        {
    24022404            /* Expose XSaveErPtr aka RstrFpErrPtrs to guest. */
    2403             pCurLeaf->uEbx &= X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR;  /* reserved - [12] == IBPB */
     2405            pCurLeaf->uEbx &= 0
     2406                           //| X86_CPUID_AMD_EFEID_EBX_CLZERO
     2407                           //| X86_CPUID_AMD_EFEID_EBX_IRPERF
     2408                           //| X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR
     2409                           //| X86_CPUID_AMD_EFEID_EBX_INVLPGB
     2410                           //| X86_CPUID_AMD_EFEID_EBX_RDPRU
     2411                           //| X86_CPUID_AMD_EFEID_EBX_BE
     2412                           //| X86_CPUID_AMD_EFEID_EBX_MCOMMIT
     2413                           | (pConfig->fSpecCtrl || PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, true)
     2414                              ? X86_CPUID_AMD_EFEID_EBX_IBPB : 0)
     2415                           //| X86_CPUID_AMD_EFEID_EBX_INT_WBINVD
     2416                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS : 0)
     2417                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP : 0)
     2418                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_ALWAYS_ON : 0)
     2419                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP_ALWAYS_ON : 0)
     2420                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_PREFERRED : 0)
     2421                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_SAME_MODE : 0)
     2422                           //| X86_CPUID_AMD_EFEID_EBX_NO_EFER_LMSLE
     2423                           //| X86_CPUID_AMD_EFEID_EBX_INVLPGB_NESTED_PAGES
     2424                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_SPEC_CTRL_SSBD : 0)
     2425                           /// @todo | X86_CPUID_AMD_EFEID_EBX_VIRT_SPEC_CTRL_SSBD
     2426                           | X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED
     2427                           //| X86_CPUID_AMD_EFEID_EBX_CPPC
     2428                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_PSFD : 0)
     2429                           | X86_CPUID_AMD_EFEID_EBX_BTC_NO
     2430                           | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBPB_RET : 0);
     2431
     2432            PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEbx, IBPB, X86_CPUID_AMD_EFEID_EBX_IBPB, pConfig->enmFlushCmdMsr);
     2433
     2434            /* Sharing this forced setting with intel would maybe confuse guests... */
     2435            if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2436                pCurLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_IBPB;
     2437
     2438            fAmdGstSupIbpb = RT_BOOL(pCurLeaf->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
    24042439        }
    24052440        else
     
    25512586    }
    25522587
    2553     /* Cpuid 0x8000001f...0x8ffffffd: Unknown.
     2588    /* Cpuid 0x80000020: Platform Quality of Service (PQOS), may have subleaves.
     2589     * For now we just zero it.  */
     2590    pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000020), 0);
     2591    if (pCurLeaf)
     2592    {
     2593        pCurLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pCurLeaf);
     2594        cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000020));
     2595    }
     2596
     2597    /* Cpuid 0x80000021: Extended Feature 2 (Zen3+?).
     2598     *
     2599     */
     2600    pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000021), 0);
     2601    if (pCurLeaf)
     2602    {
     2603        /** @todo sanitize these bits! */
     2604        pCurLeaf->uEax = 0;
     2605        pCurLeaf->uEbx = 0;
     2606        pCurLeaf->uEcx = 0;
     2607        pCurLeaf->uEdx = 0;
     2608    }
     2609    /* Linux expects us as a hypervisor to insert this leaf for Zen 1 & 2 CPUs
     2610       iff IBPB is available to the guest. This is also documented by AMD in
     2611       "TECHNICAL UPDATE REGARDING SPECULATIVE RETURN STACK OVERFLOW" rev 2.0
     2612       dated 2024-02-00. */
     2613    else if (   fAmdGstSupIbpb
     2614             && (   pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
     2615                 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
     2616             && (pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0)) != NULL
     2617             && RTX86GetCpuFamily(pExtFeatureLeaf->uEax) == 0x17)
     2618    {
     2619        static CPUMCPUIDLEAF const s_NewLeaf =
     2620        {
     2621            /* .uLeaf =*/           UINT32_C(0x80000021),
     2622            /* .uSubLeaf = */       0,
     2623            /* .fSubLeafMask = */   0,
     2624            /* .uEax = */           X86_CPUID_AMD_21_EAX_IBPB_BRTYPE,
     2625            /* .uEbx = */           0,
     2626            /* .uEcx = */           0,
     2627            /* .uEdx = */           0,
     2628            /* .fFlags = */         0,
     2629        };
     2630        int const rc2 = cpumR3CpuIdInsert(NULL, &pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves, &s_NewLeaf);
     2631        AssertRC(rc2);
     2632        pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), 0);
     2633        if (pCurLeaf && pCurLeaf->uEax < UINT32_C(0x80000021))
     2634            pCurLeaf->uEax = UINT32_C(0x80000021);
     2635    }
     2636
     2637    /* Cpuid 0x80000022...0x8ffffffd: Unknown.
    25542638     * We don't know these and what they mean, so remove them. */
    25552639    cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
    2556                            UINT32_C(0x8000001f), UINT32_C(0x8ffffffd));
     2640                           UINT32_C(0x80000022), UINT32_C(0x8ffffffd));
    25572641
    25582642    /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf.
     
    28182902     * leaf are removed.  The default is set to what we're able to sanitize.
    28192903     */
    2820     rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x8000001e));
     2904    rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x80000021));
    28212905    AssertLogRelRCReturn(rc, rc);
    28222906
     
    28282912    rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004));
    28292913    AssertLogRelRCReturn(rc, rc);
     2914
     2915    /** @cfgm{/CPUM/SpecCtrl, bool, false}
     2916     * Enables passing thru IA32_SPEC_CTRL and associated CPU bugfixes.
     2917     */
     2918    rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &pConfig->fSpecCtrl, false);
     2919    AssertRCReturn(rc, rc);
    28302920
    28312921#ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */
     
    35333623             */
    35343624            if (RT_SUCCESS(rc))
    3535                 rc = cpumR3MsrReconcileWithCpuId(pVM);
     3625                rc = cpumR3MsrReconcileWithCpuId(pVM, false, false);
    35363626            /*
    35373627             * MSR fudging.
     
    35993689
    36003690                /* Check if speculation control is enabled. */
    3601                 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
    3602                 AssertRCReturn(rc, rc);
    3603                 if (fEnable)
     3691                if (Config.fSpecCtrl)
    36043692                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
    36053693                else
     
    36383726                        if (pLeaf)
    36393727                        {
    3640                             pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
     3728                            pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED;
    36413729                            LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
    36423730                        }
     
    39564044         */
    39574045        case CPUMCPUIDFEATURE_SPEC_CTRL:
     4046        {
     4047            AssertReturnVoid(!pVM->cpum.s.GuestFeatures.fSpeculationControl); /* should only be done once! */
     4048
     4049#ifdef RT_ARCH_AMD64
     4050            if (!pVM->cpum.s.HostFeatures.s.fIbpb && !pVM->cpum.s.HostFeatures.s.fIbrs)
     4051            {
     4052                LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
     4053                return;
     4054            }
     4055#endif
     4056            bool fForceSpecCtrl = false;
     4057            bool fForceFlushCmd = false;
     4058
     4059            /*
     4060             * Intel spread feature info around a bit...
     4061             */
    39584062            if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
    39594063            {
    39604064                pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
    3961 #ifdef RT_ARCH_AMD64
    3962                 if (   !pLeaf
    3963                     || !(pVM->cpum.s.HostFeatures.s.fIbpb || pVM->cpum.s.HostFeatures.s.fIbrs))
     4065                if (!pLeaf)
    39644066                {
    3965                     LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
     4067                    LogRel(("CPUM: WARNING! Can't turn on Speculation Control on Intel CPUs without leaf 0x00000007!\n"));
    39664068                    return;
    39674069                }
    3968 #else
    3969                 if (!pLeaf)
    3970                 {
    3971                     LogRel(("CPUM: WARNING! Can't turn on Speculation Control without leaf 0x00000007!\n"));
    3972                     return;
    3973                 }
    3974 #endif
    3975 
    3976                 /* The feature can be enabled. Let's see what we can actually do. */
    3977                 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
     4070
     4071                /* Okay, the feature can be enabled. Let's see what we can actually do. */
    39784072
    39794073#ifdef RT_ARCH_AMD64
     
    39854079                    pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
    39864080                    pVM->cpum.s.GuestFeatures.fIbrs = 1;
     4081                    pVM->cpum.s.GuestFeatures.fIbpb = 1;
    39874082#ifdef RT_ARCH_AMD64
    39884083                    if (pVM->cpum.s.HostFeatures.s.fStibp)
     
    40434138                            pVM->cpum.s.GuestFeatures.fBhiCtrl = 1;
    40444139                        }
     4140                        fForceSpecCtrl = true;
    40454141                    }
    4046 
    4047                     /* Make sure we have the speculation control MSR... */
    4048                     pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_SPEC_CTRL);
    4049                     if (!pMsrRange)
    4050                     {
    4051                         static CPUMMSRRANGE const s_SpecCtrl =
    4052                         {
    4053                             /*.uFirst =*/ MSR_IA32_SPEC_CTRL, /*.uLast =*/ MSR_IA32_SPEC_CTRL,
    4054                             /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
    4055                             /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
    4056                             /*.szName = */ "IA32_SPEC_CTRL"
    4057                         };
    4058                         int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
    4059                         AssertLogRelRC(rc);
    4060                     }
    4061 
    4062                     /* ... and the predictor command MSR. */
    4063                     pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_PRED_CMD);
    4064                     if (!pMsrRange)
    4065                     {
    4066                         /** @todo incorrect fWrGpMask. */
    4067                         static CPUMMSRRANGE const s_SpecCtrl =
    4068                         {
    4069                             /*.uFirst =*/ MSR_IA32_PRED_CMD, /*.uLast =*/ MSR_IA32_PRED_CMD,
    4070                             /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
    4071                             /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
    4072                             /*.szName = */ "IA32_PRED_CMD"
    4073                         };
    4074                         int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
    4075                         AssertLogRelRC(rc);
    4076                     }
    4077 
     4142                }
     4143
     4144#ifdef RT_ARCH_AMD64
     4145                if (pVM->cpum.s.HostFeatures.s.fFlushCmd)
     4146#endif
     4147                {
     4148                    pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
     4149                    pVM->cpum.s.GuestFeatures.fFlushCmd = 1;
     4150                    fForceFlushCmd = true;
    40784151                }
    40794152
     
    40824155#endif
    40834156                {
    4084                     /* Install the architectural capabilities MSR. */
    4085                     pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES);
    4086                     if (!pMsrRange)
    4087                     {
    4088                         static CPUMMSRRANGE const s_ArchCaps =
    4089                         {
    4090                             /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
    4091                             /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
    4092                             /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ UINT64_MAX,
    4093                             /*.szName = */ "IA32_ARCH_CAPABILITIES"
    4094                         };
    4095                         int rc = CPUMR3MsrRangesInsert(pVM, &s_ArchCaps);
    4096                         AssertLogRelRC(rc);
    4097                     }
     4157                    pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
     4158                    pVM->cpum.s.GuestFeatures.fArchCap = 1;
    40984159
    40994160                    /* Advertise IBRS_ALL if present at this point... */
     
    41034164                        VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
    41044165                }
    4105 
    4106                 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
     4166                cpumCpuIdExplodeFeaturesX86SetSummaryBits(&pVM->cpum.s.GuestFeatures);
    41074167            }
     4168            /*
     4169             * AMD does things in a different (better) way.  No MSR with info,
     4170             * it's all in various CPUID leaves.
     4171             */
    41084172            else if (   pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
    41094173                     || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
    41104174            {
    41114175                /* The precise details of AMD's implementation are not yet clear. */
     4176                pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
     4177                if (!pLeaf)
     4178                {
     4179                    LogRel(("CPUM: WARNING! Can't turn on Speculation Control on AMD CPUs without leaf 0x80000008!\n"));
     4180                    return;
     4181                }
     4182
     4183                /* We passthru all the host cpuid bits on AMD, see cpumR3CpuIdSanitize,
     4184                   and there is no code to clear/unset the feature.  So, little to do.
     4185                   The only thing we could consider here, is to re-enable stuff
     4186                   suppressed for portability reasons. */
    41124187            }
     4188            else
     4189                break;
     4190
     4191            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
     4192            pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
     4193            cpumR3MsrReconcileWithCpuId(pVM, fForceFlushCmd, fForceSpecCtrl);
    41134194            break;
     4195        }
    41144196
    41154197        default:
     
    56145696    DBGFREGSUBFIELD_RO("EferLmsleUnsupported\0" "EFER.LMSLE is unsupported",            20, 1, 0),
    56155697    DBGFREGSUBFIELD_RO("INVLPGBnestedPages\0"   "INVLPGB for nested translation",       21, 1, 0),
     5698    DBGFREGSUBFIELD_RO("PPIN\0"         "Protected processor inventory number",         23, 1, 0),
    56165699    DBGFREGSUBFIELD_RO("SSBD\0"         "Speculative Store Bypass Disable",             24, 1, 0),
    56175700    DBGFREGSUBFIELD_RO("SsbdVirtSpecCtrl\0"     "Use VIRT_SPEC_CTL for SSBD",           25, 1, 0),
     
    57085791
    57095792static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc,
    5710                                                  uint32_t cchWidth)
     5793                                                 const char *pszLeadIn, uint32_t cchWidth)
    57115794{
     5795    if (pszLeadIn)
     5796        pHlp->pfnPrintf(pHlp,
     5797                        "%s\n"
     5798                        "  %-*s= guest (host)\n",
     5799                        pszLeadIn,
     5800                        cchWidth, "Mnemonic - Description");
     5801
    57125802    uint32_t uCombined = uVal1 | uVal2;
    57135803    for (uint32_t iBit = 0; iBit < 32; iBit++)
     
    57875877        ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    57885878#endif
    5789         pHlp->pfnPrintf(pHlp, "Features\n");
    5790         pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
    5791         cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, 56);
    5792         cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, 56);
     5879        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, "Features", 56);
     5880        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, NULL, 56);
    57935881    }
    57945882    else
     
    58265914                if (fVerbose)
    58275915                {
    5828                     pHlp->pfnPrintf(pHlp, " Sub-leaf 0\n");
    5829                     pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
    5830                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, 56);
    5831                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56);
     5916                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, "Sub-leaf 0", 56);
     5917                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, NULL, 56);
    58325918                    if (pCurLeaf->uEdx || Host.uEdx)
    5833                         cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);
     5919                        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, NULL, 56);
    58345920                }
    58355921                else
     
    58505936                    pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
    58515937                    if (pCurLeaf->uEbx || Host.uEbx)
    5852                         cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub2EbxSubFields, 56);
     5938                        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub2EbxSubFields, NULL, 56);
    58535939                    if (pCurLeaf->uEcx || Host.uEcx)
    5854                         cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub2EcxSubFields, 56);
    5855                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub2EdxSubFields, 56);
     5940                        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub2EcxSubFields, NULL, 56);
     5941                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub2EdxSubFields, NULL, 56);
    58565942                }
    58575943                else
     
    62406326                ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    62416327#endif
    6242                 pHlp->pfnPrintf(pHlp, "Ext Features\n");
    6243                 pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
    6244                 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, 56);
    6245                 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, 56);
     6328                cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, "Ext Features", 56);
     6329                cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, NULL, 56);
    62466330                if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
    62476331                {
    6248                     pHlp->pfnPrintf(pHlp, "SVM Feature Identification (leaf A):\n");
    62496332#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    62506333                    ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
     
    62526335                    pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x8000000a), 0);
    62536336                    uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0;
    6254                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 56);
     6337                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields,
     6338                                                         "SVM Feature Identification (leaf A)", 56);
    62556339                }
    62566340            }
     
    63696453                    cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34);
    63706454                else
    6371                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 56);
     6455                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields,
     6456                                                         "APM Features EDX", 56);
    63726457            }
    63736458        }
     
    63846469                    cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
    63856470                else
    6386                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56);
     6471                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields,
     6472                                                         "Ext Features ext IDs EBX", 56);
    63876473            }
    63886474
    63896475            if (iVerbosity)
    63906476            {
    6391                 uint32_t uEAX = pCurLeaf->uEax;
    6392                 uint32_t uECX = pCurLeaf->uEcx;
    6393 
    6394                 /** @todo 0x80000008:EAX[23:16] is only defined for AMD. We'll get 0 on Intel. On
    6395                  *        AMD if we get 0, the guest physical address width should be taken from
    6396                  *        0x80000008:EAX[7:0] instead. Guest Physical address width is relevant
    6397                  *        for guests using nested paging. */
     6477                uint32_t const uEAX = pCurLeaf->uEax;
    63986478                pHlp->pfnPrintf(pHlp,
    63996479                                "Physical Address Width:          %d bits\n"
    6400                                 "Virtual Address Width:           %d bits\n"
    6401                                 "Guest Physical Address Width:    %d bits\n",
     6480                                "Virtual Address Width:           %d bits\n",
    64026481                                (uEAX >> 0) & 0xff,
    6403                                 (uEAX >> 8) & 0xff,
    6404                                 (uEAX >> 16) & 0xff);
    6405 
    6406                 /** @todo 0x80000008:ECX is reserved on Intel (we'll get incorrect physical core
    6407                  *        count here). */
    6408                 pHlp->pfnPrintf(pHlp,
    6409                                 "Physical Core Count:             %d\n",
    6410                                 ((uECX >> 0) & 0xff) + 1);
     6482                                (uEAX >> 8) & 0xff);
     6483                if (   ((uEAX >> 16) & 0xff) != 0
     6484                    || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
     6485                    || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
     6486                    pHlp->pfnPrintf(pHlp, "Guest Physical Address Width:    %d bits%s\n",
     6487                                    (uEAX >> 16) & 0xff ? (uEAX >> 16) & 0xff : (uEAX >> 0) & 0xff,
     6488                                    (uEAX >> 16) & 0xff ? "" : " (0)");
     6489
     6490                uint32_t const uECX = pCurLeaf->uEcx;
     6491                if (   ((uECX >> 0) & 0xff) != 0
     6492                    || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
     6493                    || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
     6494                {
     6495                    uint32_t const cPhysCoreCount = ((uECX >> 0) & 0xff) + 1;
     6496                    uint32_t const cApicIdSize    = (uECX >> 12) & 0xf ? RT_BIT_32((uECX >> 12) & 0xf) : cPhysCoreCount;
     6497                    pHlp->pfnPrintf(pHlp,
     6498                                    "Physical Core Count:             %d\n"
     6499                                    "APIC ID size:                    %u (%#x)\n"
     6500                                    "Performance TSC size:            %u bits\n",
     6501                                    cPhysCoreCount,
     6502                                    cApicIdSize, cApicIdSize,
     6503                                    (((uECX >> 16) & 0x3) << 3) + 40);
     6504                }
     6505                uint32_t const uEDX = pCurLeaf->uEax;
     6506                if (uEDX)
     6507                    pHlp->pfnPrintf(pHlp,
     6508                                    "Max page count for INVLPGB:      %#x\n"
     6509                                    "Max ECX for RDPRU:               %#x\n",
     6510                                    (uEDX & 0xffff), uEDX >> 16);
    64116511            }
    64126512        }
  • trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp

    r107778 r107854  
    587587 * @returns VBox status code.
    588588 * @param   pVM                 The cross context VM structure.
    589  */
    590 int cpumR3MsrReconcileWithCpuId(PVM pVM)
    591 {
    592     PCCPUMMSRRANGE papToAdd[10];
     589 * @param   fForceFlushCmd      Make sure MSR_IA32_FLUSH_CMD is present.
     590 * @param   fForceSpecCtrl      Make sure MSR_IA32_SPEC_CTRL is present.
     591 */
     592DECLHIDDEN(int) cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl)
     593{
     594    PCCPUMMSRRANGE apToAdd[10];
    593595    uint32_t       cToAdd = 0;
    594596
     
    596598     * The IA32_FLUSH_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
    597599     */
    598     if (pVM->cpum.s.GuestFeatures.fFlushCmd && !cpumLookupMsrRange(pVM, MSR_IA32_FLUSH_CMD))
     600    if (   pVM->cpum.s.GuestFeatures.fFlushCmd
     601        || fForceFlushCmd)
    599602    {
    600603        static CPUMMSRRANGE const s_FlushCmd =
     
    611614            /*.szName = */      "IA32_FLUSH_CMD"
    612615        };
    613         papToAdd[cToAdd++] = &s_FlushCmd;
     616        apToAdd[cToAdd++] = &s_FlushCmd;
     617    }
     618
     619    /*
     620     * The IA32_PRED_CMD MSR was introduced in MCUs for CVS-2018-3646 and associates.
     621     */
     622    if (   pVM->cpum.s.GuestFeatures.fIbpb
     623        /** @todo || pVM->cpum.s.GuestFeatures.fSbpb*/)
     624    {
     625        static CPUMMSRRANGE const s_PredCmd =
     626        {
     627            /*.uFirst =*/       MSR_IA32_PRED_CMD,
     628            /*.uLast =*/        MSR_IA32_PRED_CMD,
     629            /*.enmRdFn =*/      kCpumMsrRdFn_WriteOnly,
     630            /*.enmWrFn =*/      kCpumMsrWrFn_Ia32PredCmd,
     631            /*.offCpumCpu =*/   UINT16_MAX,
     632            /*.fReserved =*/    0,
     633            /*.uValue =*/       0,
     634            /*.fWrIgnMask =*/   0,
     635            /*.fWrGpMask =*/    ~MSR_IA32_PRED_CMD_F_IBPB,
     636            /*.szName = */      "IA32_PRED_CMD"
     637        };
     638        apToAdd[cToAdd++] = &s_PredCmd;
     639    }
     640
     641    /*
     642     * The IA32_SPEC_CTRL MSR was introduced in MCUs for CVS-2018-3646 and associates.
     643     */
     644    if (   pVM->cpum.s.GuestFeatures.fSpecCtrlMsr
     645        || fForceSpecCtrl)
     646    {
     647        static CPUMMSRRANGE const s_SpecCtrl =
     648        {
     649            /*.uFirst =*/       MSR_IA32_SPEC_CTRL,
     650            /*.uLast =*/        MSR_IA32_SPEC_CTRL,
     651            /*.enmRdFn =*/      kCpumMsrRdFn_Ia32SpecCtrl,
     652            /*.enmWrFn =*/      kCpumMsrWrFn_Ia32SpecCtrl,
     653            /*.offCpumCpu =*/   UINT16_MAX,
     654            /*.fReserved =*/    0,
     655            /*.uValue =*/       0,
     656            /*.fWrIgnMask =*/   0,
     657            /*.fWrGpMask =*/    0,
     658            /*.szName = */      "IA32_SPEC_CTRL"
     659        };
     660        apToAdd[cToAdd++] = &s_SpecCtrl;
    614661    }
    615662
     
    618665     * documented in relation to such.
    619666     */
    620     if (pVM->cpum.s.GuestFeatures.fArchCap && !cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES))
     667    if (pVM->cpum.s.GuestFeatures.fArchCap)
    621668    {
    622669        static CPUMMSRRANGE const s_ArchCaps =
     
    633680            /*.szName = */      "IA32_ARCH_CAPABILITIES"
    634681        };
    635         papToAdd[cToAdd++] = &s_ArchCaps;
     682        apToAdd[cToAdd++] = &s_ArchCaps;
    636683    }
    637684
     
    639686     * Do the adding.
    640687     */
     688    Assert(cToAdd <= RT_ELEMENTS(apToAdd));
    641689    for (uint32_t i = 0; i < cToAdd; i++)
    642690    {
    643         PCCPUMMSRRANGE pRange = papToAdd[i];
    644         LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName));
    645         int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
    646                                        pRange);
    647         if (RT_FAILURE(rc))
    648             return rc;
     691        PCCPUMMSRRANGE pRange = apToAdd[i];
     692        Assert(pRange->uFirst == pRange->uLast);
     693        if (!cpumLookupMsrRange(pVM, pRange->uFirst))
     694        {
     695            LogRel(("CPUM: MSR/CPUID reconciliation insert: %#010x %s\n", pRange->uFirst, pRange->szName));
     696            int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3,
     697                                           &pVM->cpum.s.GuestInfo.cMsrRanges, pRange);
     698            AssertRCReturn(rc, rc);
     699        }
    649700    }
    650701    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r107700 r107854  
    543543# ifdef RT_ARCH_AMD64
    544544AssertCompileMemberAlignment(CPUMCPU, Host, 64);
     545AssertCompileAdjacentMembers(CPUMCPU, Guest, GuestMsrs); /* HACK ALERT! HMR0A.asm makes this ASSUMPTION in the SVM RUN code! */
    545546# endif
    546547#endif
     
    563564int                 cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs,
    564565                                                CPUMFEATURESX86 *pFeatures);
     566void                cpumCpuIdExplodeFeaturesX86SetSummaryBits(CPUMFEATURESX86 *pFeatures);
     567void                cpumCpuIdExplodeArchCapabilities(CPUMFEATURESX86 *pFeatures, bool fHasArchCap, uint64_t fArchVal);
    565568# endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
    566569# if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8)
     
    591594#  ifdef VBOX_VMM_TARGET_X86
    592595int                 cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
    593 int                 cpumR3MsrReconcileWithCpuId(PVM pVM);
     596DECLHIDDEN(int)     cpumR3MsrReconcileWithCpuId(PVM pVM, bool fForceFlushCmd, bool fForceSpecCtrl);
    594597int                 cpumR3MsrApplyFudge(PVM pVM);
    595598int                 cpumR3MsrRegStats(PVM pVM);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r106061 r107854  
    592592/** @addtogroup grp_hm_int_svm  SVM Internal
    593593 * @{ */
    594 /** SVM VMRun function, see SVMR0VMRun(). */
     594/** SVM VMRun function, see SVMR0VMRun().  */
    595595typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB));
    596596/** Pointer to a SVM VMRun function. */
     
    11791179/** Flush MDS buffers on VM entry. */
    11801180#define HM_WSF_MDS_ENTRY            RT_BIT_32(3)
     1181/** MSR_IA32_SPEC_CTRL needs to be replaced upon entry and exit.
     1182 * Save host value on entry, load guest value, run guest, save guest value on
     1183 * exit and restore the host value.
     1184 * @todo may not reliable for VT-x/Intel.  */
     1185#define HM_WSF_SPEC_CTRL            RT_BIT_32(4)
    11811186
    11821187/** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */
     
    12441249 * @{
    12451250 */
    1246 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1247 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1248 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1249 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1250 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1251 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1252 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    1253 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1251DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1252DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1253DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1254DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1255DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1256DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1257DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1258DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_SansSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1259DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1260DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1261DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1262DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1263DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1264DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1265DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
     1266DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit_WithSpecCtrl(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
    12541267/** @} */
    12551268
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r106061 r107854  
    276276%define HM_WSF_L1D_ENTRY            RT_BIT_32(2)
    277277%define HM_WSF_MDS_ENTRY            RT_BIT_32(3)
    278 
     278%define HM_WSF_SPEC_CTRL            RT_BIT_32(4)
     279
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette