VirtualBox

Changeset 105020 in vbox


Ignore:
Timestamp:
Jun 25, 2024 12:39:37 PM (7 months ago)
Author:
vboxsync
Message:

VMM/CPUM: We need to push the ARCH_CAP MSR on arm hosts, setting all bits that indicates that the 'cpu' isn't suffering from problems. bugref:10687

Location:
trunk/src/VBox/VMM/VMMR3
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r103752 r105020  
    133133#include "CPUMInternal.h"
    134134#include <VBox/vmm/vm.h>
     135#include <VBox/vmm/vmcc.h>
    135136
    136137#include <VBox/param.h>
     
    22582259    pVM->cpum.s.HostFeatures.fAvx2              = false;
    22592260# endif
     2261
     2262    /* We must strongly discourage the guest from doing unnecessary stuff with the
     2263       page tables to avoid exploits, as that's expensive and doesn't apply to us. */
     2264    pVM->cpum.s.HostFeatures.fArchRdclNo             = true;
     2265    pVM->cpum.s.HostFeatures.fArchIbrsAll            = true;
     2266    //pVM->cpum.s.HostFeatures.fArchRsbOverride        = true;
     2267    pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = true;
     2268    pVM->cpum.s.HostFeatures.fArchMdsNo              = true;
     2269    VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps = MSR_IA32_ARCH_CAP_F_RDCL_NO
     2270                                                                       | MSR_IA32_ARCH_CAP_F_IBRS_ALL
     2271                                                                       //| MSR_IA32_ARCH_CAP_F_RSBO
     2272                                                                       | MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D
     2273                                                                       | MSR_IA32_ARCH_CAP_F_SSB_NO
     2274                                                                       | MSR_IA32_ARCH_CAP_F_MDS_NO
     2275                                                                       | MSR_IA32_ARCH_CAP_F_IF_PSCHANGE_MC_NO
     2276                                                                       //| MSR_IA32_ARCH_CAP_F_TSX_CTRL
     2277                                                                       //| MSR_IA32_ARCH_CAP_F_TAA_NO
     2278                                                                       //| MSR_IA32_ARCH_CAP_F_MISC_PACKAGE_CTRLS
     2279                                                                       //| MSR_IA32_ARCH_CAP_F_ENERGY_FILTERING_CTL
     2280                                                                       //| MSR_IA32_ARCH_CAP_F_DOITM
     2281                                                                       | MSR_IA32_ARCH_CAP_F_SBDR_SSDP_NO
     2282                                                                       | MSR_IA32_ARCH_CAP_F_FBSDP_NO
     2283                                                                       | MSR_IA32_ARCH_CAP_F_PSDP_NO
     2284                                                                       //| MSR_IA32_ARCH_CAP_F_FB_CLEAR
     2285                                                                       //| MSR_IA32_ARCH_CAP_F_FB_CLEAR_CTRL
     2286                                                                       //| MSR_IA32_ARCH_CAP_F_RRSBA
     2287                                                                       | MSR_IA32_ARCH_CAP_F_BHI_NO
     2288                                                                       //| MSR_IA32_ARCH_CAP_F_XAPIC_DISABLE_STATUS
     2289                                                                       //| MSR_IA32_ARCH_CAP_F_OVERCLOCKING_STATUS
     2290                                                                       | MSR_IA32_ARCH_CAP_F_PBRSB_NO
     2291                                                                       //| MSR_IA32_ARCH_CAP_F_GDS_CTRL
     2292                                                                       | MSR_IA32_ARCH_CAP_F_GDS_NO
     2293                                                                       | MSR_IA32_ARCH_CAP_F_RFDS_NO
     2294                                                                       //| MSR_IA32_ARCH_CAP_F_RFDS_CLEAR
     2295                             );
    22602296#endif
    22612297
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r103752 r105020  
    10141014/** Enable the extension if it's supported by the host CPU. */
    10151015#define CPUMISAEXTCFG_ENABLED_SUPPORTED     true
     1016/** Enable the extension if it's supported by the host CPU or when on ARM64. */
     1017#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     1018# define CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64   CPUMISAEXTCFG_ENABLED_SUPPORTED
     1019#else
     1020# define CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64   CPUMISAEXTCFG_ENABLED_ALWAYS
     1021#endif
    10161022/** Enable the extension if it's supported by the host CPU, but don't let
    10171023 * the portable CPUID feature disable it. */
     
    30273033     * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest.
    30283034     */
    3029     rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
     3035    rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64);
    30303036    AssertLogRelRCReturn(rc, rc);
    30313037
     
    34083414
    34093415    /*
    3410      * Setup MSRs introduced in microcode updates or that are otherwise not in
    3411      * the CPU profile, but are advertised in the CPUID info we just sanitized.
     3416     * Move the CPUID array over to the static VM structure allocation
     3417     * and explode guest CPU features again.  We must do this *before*
     3418     * reconciling MSRs with CPUIDs and applying any fudging (esp on ARM64).
    34123419     */
    34133420    if (RT_SUCCESS(rc))
    3414         rc = cpumR3MsrReconcileWithCpuId(pVM);
    3415     /*
    3416      * MSR fudging.
    3417      */
    3418     if (RT_SUCCESS(rc))
    3419     {
    3420         /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
    3421          * Fudges some common MSRs if not present in the selected CPU database entry.
    3422          * This is for trying to keep VMs running when moved between different hosts
    3423          * and different CPU vendors. */
    3424         bool fEnable;
    3425         rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
    3426         if (RT_SUCCESS(rc) && fEnable)
    3427         {
    3428             rc = cpumR3MsrApplyFudge(pVM);
    3429             AssertLogRelRC(rc);
    3430         }
    3431     }
    3432     if (RT_SUCCESS(rc))
    3433     {
    3434         /*
    3435          * Move the MSR and CPUID arrays over to the static VM structure allocations
    3436          * and explode guest CPU features again.
    3437          */
    3438         void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
     3421    {
     3422        void * const pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
    34393423        rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
    34403424                                                pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
     3425        AssertLogRelRC(rc);
    34413426        RTMemFree(pvFree);
    3442 
    3443         AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
    3444                        ("%u\n", pCpum->GuestInfo.cMsrRanges));
    3445         memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
    3446                sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
    3447         RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
    3448         pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
    3449 
    3450         AssertLogRelRCReturn(rc, rc);
    3451 
    3452         /*
    3453          * Some more configuration that we're applying at the end of everything
    3454          * via the CPUMR3SetGuestCpuIdFeature API.
    3455          */
    3456 
    3457         /* Check if 64-bit guest supported was enabled. */
    3458         bool fEnable64bit;
    3459         rc = CFGMR3QueryBoolDef(pCpumCfg, "Enable64bit", &fEnable64bit, false);
    3460         AssertRCReturn(rc, rc);
    3461         if (fEnable64bit)
    3462         {
    3463             /* In case of a CPU upgrade: */
    3464             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
    3465             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);      /* (Long mode only on Intel CPUs.) */
    3466             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
    3467             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
    3468             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    3469 
    3470             /* The actual feature: */
    3471             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
    3472         }
    3473 
    3474         /* Check if PAE was explicitely enabled by the user. */
    3475         bool fEnable;
    3476         rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, fEnable64bit);
    3477         AssertRCReturn(rc, rc);
    3478         if (fEnable && !pVM->cpum.s.GuestFeatures.fPae)
    3479             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
    3480 
    3481         /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
    3482         rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, fEnable64bit);
    3483         AssertRCReturn(rc, rc);
    3484         if (fEnable && !pVM->cpum.s.GuestFeatures.fNoExecute)
    3485             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    3486 
    3487         /* Check if speculation control is enabled. */
    3488         rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
    3489         AssertRCReturn(rc, rc);
    3490         if (fEnable)
    3491             CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
    3492         else
     3427        if (RT_SUCCESS(rc))
    34933428        {
    34943429            /*
    3495              * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
    3496              * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
    3497              * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
    3498              *
    3499              * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
    3500              *    EIP: _raw_spin_lock+0x14/0x30
    3501              *    EFLAGS: 00010046 CPU: 0
    3502              *    EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
    3503              *    ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
    3504              *    DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
    3505              *    CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
    3506              *    Call Trace:
    3507              *     speculative_store_bypass_update+0x8e/0x180
    3508              *     ssb_prctl_set+0xc0/0xe0
    3509              *     arch_seccomp_spec_mitigate+0x1d/0x20
    3510              *     do_seccomp+0x3cb/0x610
    3511              *     SyS_seccomp+0x16/0x20
    3512              *     do_fast_syscall_32+0x7f/0x1d0
    3513              *     entry_SYSENTER_32+0x4e/0x7c
    3514              *
    3515              * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
    3516              * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
    3517              *
    3518              * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
    3519              * guest to not even try.
     3430             * Setup MSRs introduced in microcode updates or that are otherwise not in
     3431             * the CPU profile, but are advertised in the CPUID info we just sanitized.
    35203432             */
    3521             if (   pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
    3522                 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
     3433            if (RT_SUCCESS(rc))
     3434                rc = cpumR3MsrReconcileWithCpuId(pVM);
     3435            /*
     3436             * MSR fudging.
     3437             */
     3438            if (RT_SUCCESS(rc))
    35233439            {
    3524                 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
    3525                 if (pLeaf)
     3440                /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
     3441                 * Fudges some common MSRs if not present in the selected CPU database entry.
     3442                 * This is for trying to keep VMs running when moved between different hosts
     3443                 * and different CPU vendors. */
     3444                bool fEnable;
     3445                rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
     3446                if (RT_SUCCESS(rc) && fEnable)
    35263447                {
    3527                     pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
    3528                     LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
     3448                    rc = cpumR3MsrApplyFudge(pVM);
     3449                    AssertLogRelRC(rc);
    35293450                }
    35303451            }
    3531         }
    3532 
    3533         /*
    3534          * MTRR support.
    3535          * We've always reported the MTRR feature bit in CPUID.
    3536          * Here we allow exposing MTRRs with reasonable default values (especially required
    3537          * by Windows 10 guests with Hyper-V enabled). The MTRR support isn't feature
    3538          * complete, see @bugref{10318} and bugref{10498}.
    3539          */
    3540         if (pVM->cpum.s.GuestFeatures.fMtrr)
    3541         {
    3542             /** @cfgm{/CPUM/MtrrWrite, boolean, true}
    3543              * Whether to enable MTRR read-write support. This overrides the MTRR read-only CFGM
    3544              * setting. */
    3545             bool fEnableMtrrReadWrite;
    3546             rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadWrite", &fEnableMtrrReadWrite, true);
    3547             AssertRCReturn(rc, rc);
    3548             if (fEnableMtrrReadWrite)
     3452            if (RT_SUCCESS(rc))
    35493453            {
    3550                 pVM->cpum.s.fMtrrRead  = true;
    3551                 pVM->cpum.s.fMtrrWrite = true;
    3552                 LogRel(("CPUM: Enabled MTRR read-write support\n"));
     3454                /*
     3455                 * Move the MSR arrays over to the static VM structure allocation.
     3456                 */
     3457                AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
     3458                               ("%u\n", pCpum->GuestInfo.cMsrRanges));
     3459                memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
     3460                       sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
     3461                RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
     3462                pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
     3463
     3464                /*
     3465                 * Some more configuration that we're applying at the end of everything
     3466                 * via the CPUMR3SetGuestCpuIdFeature API.
     3467                 */
     3468
     3469                /* Check if 64-bit guest supported was enabled. */
     3470                bool fEnable64bit;
     3471                rc = CFGMR3QueryBoolDef(pCpumCfg, "Enable64bit", &fEnable64bit, false);
     3472                AssertRCReturn(rc, rc);
     3473                if (fEnable64bit)
     3474                {
     3475                    /* In case of a CPU upgrade: */
     3476                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
     3477                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);      /* (Long mode only on Intel CPUs.) */
     3478                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
     3479                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
     3480                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
     3481
     3482                    /* The actual feature: */
     3483                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
     3484                }
     3485
     3486                /* Check if PAE was explicitely enabled by the user. */
     3487                bool fEnable;
     3488                rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, fEnable64bit);
     3489                AssertRCReturn(rc, rc);
     3490                if (fEnable && !pVM->cpum.s.GuestFeatures.fPae)
     3491                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
     3492
     3493                /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
     3494                rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, fEnable64bit);
     3495                AssertRCReturn(rc, rc);
     3496                if (fEnable && !pVM->cpum.s.GuestFeatures.fNoExecute)
     3497                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
     3498
     3499                /* Check if speculation control is enabled. */
     3500                rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
     3501                AssertRCReturn(rc, rc);
     3502                if (fEnable)
     3503                    CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
     3504                else
     3505                {
     3506                    /*
     3507                     * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
     3508                     * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
     3509                     * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
     3510                     *
     3511                     * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
     3512                     *    EIP: _raw_spin_lock+0x14/0x30
     3513                     *    EFLAGS: 00010046 CPU: 0
     3514                     *    EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
     3515                     *    ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
     3516                     *    DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
     3517                     *    CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
     3518                     *    Call Trace:
     3519                     *     speculative_store_bypass_update+0x8e/0x180
     3520                     *     ssb_prctl_set+0xc0/0xe0
     3521                     *     arch_seccomp_spec_mitigate+0x1d/0x20
     3522                     *     do_seccomp+0x3cb/0x610
     3523                     *     SyS_seccomp+0x16/0x20
     3524                     *     do_fast_syscall_32+0x7f/0x1d0
     3525                     *     entry_SYSENTER_32+0x4e/0x7c
     3526                     *
     3527                     * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
     3528                     * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
     3529                     *
     3530                     * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
     3531                     * guest to not even try.
     3532                     */
     3533                    if (   pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
     3534                        || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
     3535                    {
     3536                        PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
     3537                        if (pLeaf)
     3538                        {
     3539                            pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
     3540                            LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
     3541                        }
     3542                    }
     3543                }
     3544
     3545                /*
     3546                 * MTRR support.
     3547                 * We've always reported the MTRR feature bit in CPUID.
     3548                 * Here we allow exposing MTRRs with reasonable default values (especially required
     3549                 * by Windows 10 guests with Hyper-V enabled). The MTRR support isn't feature
     3550                 * complete, see @bugref{10318} and bugref{10498}.
     3551                 */
     3552                if (pVM->cpum.s.GuestFeatures.fMtrr)
     3553                {
     3554                    /** @cfgm{/CPUM/MtrrWrite, boolean, true}
     3555                     * Whether to enable MTRR read-write support. This overrides the MTRR read-only CFGM
     3556                     * setting. */
     3557                    bool fEnableMtrrReadWrite;
     3558                    rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadWrite", &fEnableMtrrReadWrite, true);
     3559                    AssertRCReturn(rc, rc);
     3560                    if (fEnableMtrrReadWrite)
     3561                    {
     3562                        pVM->cpum.s.fMtrrRead  = true;
     3563                        pVM->cpum.s.fMtrrWrite = true;
     3564                        LogRel(("CPUM: Enabled MTRR read-write support\n"));
     3565                    }
     3566                    else
     3567                    {
     3568                        /** @cfgm{/CPUM/MtrrReadOnly, boolean, false}
     3569                         * Whether to enable MTRR read-only support and to initialize mapping of guest
     3570                         * memory via MTRRs. When disabled, MTRRs are left blank, returns 0 on reads and
     3571                         * ignores writes. Some guests like GNU/Linux recognize a virtual system when MTRRs
     3572                         * are left blank but some guests may expect their RAM to be mapped via MTRRs
     3573                         * similar to real hardware. */
     3574                        rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadOnly", &pVM->cpum.s.fMtrrRead, false);
     3575                        AssertRCReturn(rc, rc);
     3576                        LogRel(("CPUM: Enabled MTRR read-only support\n"));
     3577                    }
     3578
     3579                    /* Setup MTRR capability based on what the guest CPU profile (typically host) supports. */
     3580                    Assert(!pVM->cpum.s.fMtrrWrite || pVM->cpum.s.fMtrrRead);
     3581                    if (pVM->cpum.s.fMtrrRead)
     3582                    {
     3583                        /** @cfgm{/CPUM/MtrrVarCountIsVirtual, boolean, true}
     3584                         * When enabled, the number of variable-range MTRRs are virtualized. When disabled,
     3585                         * the number of variable-range MTRRs are derived from the CPU profile. Unless
     3586                         * guests have problems with a virtualized number of variable-range MTRRs, it is
     3587                         * recommended to keep this enabled so that there are sufficient MTRRs to fully
     3588                         * describe all regions of the guest RAM. */
     3589                        bool fMtrrVarCountIsVirt;
     3590                        rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrVarCountIsVirtual", &fMtrrVarCountIsVirt, true);
     3591                        AssertRCReturn(rc, rc);
     3592
     3593                        rc = cpumR3InitMtrrCap(pVM, fMtrrVarCountIsVirt);
     3594                        if (RT_SUCCESS(rc))
     3595                        { /* likely */ }
     3596                        else
     3597                            return rc;
     3598                    }
     3599                }
     3600
     3601                /*
     3602                 * Finally, initialize guest VMX MSRs.
     3603                 *
     3604                 * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
     3605                 * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
     3606                 * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
     3607                 */
     3608                /** @todo r=bird: given that long mode never used to be enabled before the
     3609                 *        VMINITCOMPLETED_RING0 state, and we're a lot earlier here in ring-3
     3610                 *        init, the above comment cannot be entirely accurate. */
     3611                if (pVM->cpum.s.GuestFeatures.fVmx)
     3612                {
     3613                    Assert(Config.fNestedHWVirt);
     3614                    cpumR3InitVmxGuestFeaturesAndMsrs(pVM, pCpumCfg, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
     3615
     3616                    /* Copy MSRs to all VCPUs */
     3617                    PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
     3618                    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     3619                    {
     3620                        PVMCPU pVCpu = pVM->apCpusR3[idCpu];
     3621                        memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
     3622                    }
     3623                }
     3624
     3625                return VINF_SUCCESS;
    35533626            }
    3554             else
    3555             {
    3556                 /** @cfgm{/CPUM/MtrrReadOnly, boolean, false}
    3557                  * Whether to enable MTRR read-only support and to initialize mapping of guest
    3558                  * memory via MTRRs. When disabled, MTRRs are left blank, returns 0 on reads and
    3559                  * ignores writes. Some guests like GNU/Linux recognize a virtual system when MTRRs
    3560                  * are left blank but some guests may expect their RAM to be mapped via MTRRs
    3561                  * similar to real hardware. */
    3562                 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadOnly", &pVM->cpum.s.fMtrrRead, false);
    3563                 AssertRCReturn(rc, rc);
    3564                 LogRel(("CPUM: Enabled MTRR read-only support\n"));
    3565             }
    3566 
    3567             /* Setup MTRR capability based on what the guest CPU profile (typically host) supports. */
    3568             Assert(!pVM->cpum.s.fMtrrWrite || pVM->cpum.s.fMtrrRead);
    3569             if (pVM->cpum.s.fMtrrRead)
    3570             {
    3571                 /** @cfgm{/CPUM/MtrrVarCountIsVirtual, boolean, true}
    3572                  * When enabled, the number of variable-range MTRRs are virtualized. When disabled,
    3573                  * the number of variable-range MTRRs are derived from the CPU profile. Unless
    3574                  * guests have problems with a virtualized number of variable-range MTRRs, it is
    3575                  * recommended to keep this enabled so that there are sufficient MTRRs to fully
    3576                  * describe all regions of the guest RAM. */
    3577                 bool fMtrrVarCountIsVirt;
    3578                 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrVarCountIsVirtual", &fMtrrVarCountIsVirt, true);
    3579                 AssertRCReturn(rc, rc);
    3580 
    3581                 rc = cpumR3InitMtrrCap(pVM, fMtrrVarCountIsVirt);
    3582                 if (RT_SUCCESS(rc))
    3583                 { /* likely */ }
    3584                 else
    3585                     return rc;
    3586             }
    3587         }
    3588 
    3589         /*
    3590          * Finally, initialize guest VMX MSRs.
    3591          *
    3592          * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
    3593          * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
    3594          * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
    3595          */
    3596         /** @todo r=bird: given that long mode never used to be enabled before the
    3597          *        VMINITCOMPLETED_RING0 state, and we're a lot earlier here in ring-3
    3598          *        init, the above comment cannot be entirely accurate. */
    3599         if (pVM->cpum.s.GuestFeatures.fVmx)
    3600         {
    3601             Assert(Config.fNestedHWVirt);
    3602             cpumR3InitVmxGuestFeaturesAndMsrs(pVM, pCpumCfg, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
    3603 
    3604             /* Copy MSRs to all VCPUs */
    3605             PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
    3606             for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    3607             {
    3608                 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
    3609                 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
    3610             }
    3611         }
    3612 
    3613         return VINF_SUCCESS;
    3614     }
    3615 
    3616     /*
    3617      * Failed before switching to hyper heap.
    3618      */
    3619     RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
    3620     pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
     3627
     3628            /*
     3629             * Failed before/while switching to internal VM structure storage.
     3630             */
     3631            RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
     3632            pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
     3633        }
     3634    }
    36213635    RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
    36223636    pCpum->GuestInfo.paMsrRangesR3 = NULL;
     
    38443858            {
    38453859                pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
     3860#ifdef RT_ARCH_AMD64
    38463861                if (   !pLeaf
    38473862                    || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
     
    38503865                    return;
    38513866                }
     3867#else
     3868                if (!pLeaf)
     3869                {
     3870                    LogRel(("CPUM: WARNING! Can't turn on Speculation Control without leaf 0x00000007!\n"));
     3871                    return;
     3872                }
     3873#endif
    38523874
    38533875                /* The feature can be enabled. Let's see what we can actually do. */
    38543876                pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
    38553877
     3878#ifdef RT_ARCH_AMD64
    38563879                /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
    38573880                if (pVM->cpum.s.HostFeatures.fIbrs)
     3881#endif
    38583882                {
    38593883                    pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
    38603884                    pVM->cpum.s.GuestFeatures.fIbrs = 1;
     3885#ifdef RT_ARCH_AMD64
    38613886                    if (pVM->cpum.s.HostFeatures.fStibp)
     3887#endif
    38623888                    {
    38633889                        pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_STIBP;
     
    38983924                }
    38993925
     3926#ifdef RT_ARCH_AMD64
    39003927                if (pVM->cpum.s.HostFeatures.fArchCap)
     3928#endif
    39013929                {
    39023930                    /* Install the architectural capabilities MSR. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette