VirtualBox

Ignore:
Timestamp:
Jan 21, 2021 7:56:26 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HMSVM: Straighten out the svm vmrun assembly code by having different version for each of the 3 branch conditions. The C code changes which one it calls when the conditions changes (only one which does so at runtime). Should be better for spectre fighting and general mojo.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r87332 r87359  
    705705
    706706/**
     707 * Sets pfnVMRun to the best suited variant.
     708 *
     709 * This must be called whenever anything changes relative to the SVMR0VMRun
     710 * variant selection:
     711 *      - pVCpu->hm.s.fLoadSaveGuestXcr0
     712 *      - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher
     713 *      - CPUMCTX_WSF_IBPB_EXIT  in pVCpu->cpum.GstCtx.fWorldSwitcher
     714 *      - CPUMIsGuestFPUStateActive() (windows only)
     715 *      - CPUMCTX.fXStateMask (windows only)
     716 *
     717 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor
     718 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime.
     719 */
     720static void hmR0SvmUpdateRunFunction(PVMCPUCC pVCpu)
     721{
     722    static const PFNHMSVMVMRUN s_apfnHmR0SvmVmRunFunctions[] =
     723    {
     724        hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit,
     725        hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit,
     726        hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit,
     727        hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit,
     728        hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit,
     729        hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit,
     730        hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit,
     731        hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit,
     732    };
     733    uintptr_t const idx = (pVCpu->hm.s.fLoadSaveGuestXcr0                             ? 1 : 0)
     734                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0)
     735                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_EXIT  ? 4 : 0);
     736    PFNHMSVMVMRUN const pfnVMRun = s_apfnHmR0SvmVmRunFunctions[idx];
     737    if (pVCpu->hm.s.svm.pfnVMRun != pfnVMRun)
     738        pVCpu->hm.s.svm.pfnVMRun = pfnVMRun;
     739}
     740
     741
     742/**
     743 * Selector FNHMSVMVMRUN implementation.
     744 */
     745static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)
     746{
     747    hmR0SvmUpdateRunFunction(pVCpu);
     748    return pVCpu->hm.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB);
     749}
     750
     751
     752/**
    707753 * Does per-VM AMD-V initialization.
    708754 *
     
    745791         * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
    746792         */
    747         pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
     793        pVCpu->hm.s.svm.pfnVMRun = hmR0SvmVMRunSelector;
    748794
    749795        /*
     
    16221668
    16231669    /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    1624     pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     1670    bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     1671    if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
     1672    {
     1673        pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     1674        hmR0SvmUpdateRunFunction(pVCpu);
     1675    }
    16251676
    16261677    /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
     
    65136564    {
    65146565        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    6515         pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    6516         Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0,
    6517                   pCtx->cr4));
     6566        bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     6567        Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
     6568        if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
     6569        {
     6570            pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     6571            hmR0SvmUpdateRunFunction(pVCpu);
     6572        }
    65186573    }
    65196574    else if (rcStrict == VINF_IEM_RAISED_XCPT)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette