VirtualBox

Changeset 68406 in vbox


Ignore:
Timestamp:
Aug 14, 2017 10:22:55 AM (8 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Nested Hw.virt: bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r68364 r68406  
    912912    }
    913913
    914 #ifdef VBOX_WITH_NESTED_HWVIRT
    915     /*
    916      * Only if the nested hypervisor says it does not need to flush anything in the TLB,
    917      * can we possibly apply it on the host. Otherwise, the nested-guest TLB flush setting
    918      * should be used and then the host settings be added on top.
    919      */
    920     if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    921     {
    922         PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    923         if (pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
    924             pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
    925         else
    926             pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush;
    927     }
    928 #else
    929     RT_NOREF(pCtx);
    930     pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
    931 #endif
    932 
     914    /*
     915     * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
     916     * This Host CPU requirement takes precedence.
     917     */
    933918    if (pVM->hm.s.svm.fAlwaysFlushTLB)
    934919    {
    935         /*
    936          * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
    937          */
    938920        pCpu->uCurrentAsid               = 1;
    939921        pVCpu->hm.s.uCurrentAsid         = 1;
     
    948930            pVCpu->hm.s.idLastCpu = pCpu->idCpu;
    949931    }
    950     else if (pVCpu->hm.s.fForceTLBFlush)
    951     {
    952         /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
    953         pVmcb->ctrl.u64VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
    954 
    955         if (fNewAsid)
    956         {
    957             ++pCpu->uCurrentAsid;
    958             bool fHitASIDLimit = false;
    959             if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     932    else
     933    {
     934#ifdef VBOX_WITH_NESTED_HWVIRT
     935        /*
     936         * Only if the nested hypervisor says it does not need to flush anything in the TLB,
     937         * can we possibly apply it on the host. Otherwise, the nested-guest TLB flush setting
     938         * should be used and then the host settings be added on top.
     939         */
     940        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     941        {
     942            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     943            if (pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
     944                pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
     945            else
     946                pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush;
     947        }
     948#else
     949        RT_NOREF(pCtx);
     950        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
     951#endif
     952        if (pVCpu->hm.s.fForceTLBFlush)
     953        {
     954            /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
     955            pVmcb->ctrl.u64VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
     956
     957            if (fNewAsid)
    960958            {
    961                 pCpu->uCurrentAsid = 1;      /* Wraparound at 1; host uses 0 */
    962                 pCpu->cTlbFlushes++;         /* All VCPUs that run on this host CPU must use a new ASID. */
    963                 fHitASIDLimit      = true;
    964 
    965                 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     959                ++pCpu->uCurrentAsid;
     960                bool fHitASIDLimit = false;
     961                if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    966962                {
    967                     pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    968                     pCpu->fFlushAsidBeforeUse = true;
     963                    pCpu->uCurrentAsid = 1;      /* Wraparound at 1; host uses 0 */
     964                    pCpu->cTlbFlushes++;         /* All VCPUs that run on this host CPU must use a new ASID. */
     965                    fHitASIDLimit      = true;
     966
     967                    if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     968                    {
     969                        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     970                        pCpu->fFlushAsidBeforeUse = true;
     971                    }
     972                    else
     973                    {
     974                        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     975                        pCpu->fFlushAsidBeforeUse = false;
     976                    }
    969977                }
    970                 else
     978
     979                if (   !fHitASIDLimit
     980                    && pCpu->fFlushAsidBeforeUse)
    971981                {
    972                     pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    973                     pCpu->fFlushAsidBeforeUse = false;
     982                    if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     983                        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     984                    else
     985                    {
     986                        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     987                        pCpu->fFlushAsidBeforeUse = false;
     988                    }
    974989                }
     990
     991                pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
     992                pVCpu->hm.s.idLastCpu    = pCpu->idCpu;
     993                pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
    975994            }
    976 
    977             if (   !fHitASIDLimit
    978                 && pCpu->fFlushAsidBeforeUse)
     995            else
    979996            {
    980997                if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    981998                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    982999                else
    983                 {
    9841000                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    985                     pCpu->fFlushAsidBeforeUse = false;
    986                 }
    9871001            }
    9881002
    989             pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
    990             pVCpu->hm.s.idLastCpu    = pCpu->idCpu;
    991             pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
    992         }
    993         else
    994         {
    995             if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    996                 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    997             else
    998                 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    999         }
    1000 
    1001         pVCpu->hm.s.fForceTLBFlush = false;
     1003            pVCpu->hm.s.fForceTLBFlush = false;
     1004        }
    10021005    }
    10031006
     
    10101013
    10111014#ifdef VBOX_WITH_NESTED_HWVIRT
    1012     Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING);
     1015    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx) || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING);
    10131016#endif
    10141017
     
    13421345static void hmR0SvmLoadGuestControlRegsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
    13431346{
     1347    /*
     1348     * Guest CR0.
     1349     */
     1350    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
     1351    {
     1352        pVmcbNstGst->guest.u64CR0 = pCtx->cr0;
     1353        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1354        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
     1355    }
     1356
    13441357    /*
    13451358     * Guest CR2.
     
    20062019               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    20072020
    2008     Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp));
     2021    Log4(("hmR0SvmLoadGuestState: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32\n", pCtx->cs.Sel, pCtx->rip,
     2022          pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
    20092023    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    20102024    return rc;
     
    20572071        hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
    20582072
     2073        PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2074        PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     2075
    20592076        /*
    20602077         * The IOPM of the nested-guest can be ignored because the the guest always
     
    20622079         * into the nested-guest one and swap it back on the #VMEXIT.
    20632080         */
    2064         PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2065         PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    2066         pVmcbNstGstCtrl->u64IOPMPhysAddr  = g_HCPhysIOBitmap;
     2081        pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
    20672082
    20682083        /*
     
    21382153               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    21392154
    2140     Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp));
     2155    Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32\n", pCtx->cs.Sel, pCtx->rip,
     2156          pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
    21412157    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    21422158    return rc;
     
    21662182        if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    21672183            hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
     2184        else
     2185            Assert(pVmcb->guest.u64CR0 == pCtx->cr0);
    21682186#else
    21692187        hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
     
    22272245    if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx))
    22282246    {
    2229         pMixedCtx->cr3        = pVmcb->guest.u64CR3;
    2230         pMixedCtx->cr4        = pVmcb->guest.u64CR4;
    2231         pMixedCtx->cr0        = pVmcb->guest.u64CR0;
     2247        pMixedCtx->cr3    = pVmcb->guest.u64CR3;
     2248        pMixedCtx->cr4    = pVmcb->guest.u64CR4;
     2249        pMixedCtx->cr0    = pVmcb->guest.u64CR0;
    22322250    }
    22332251#endif
     
    40274045#ifdef VBOX_WITH_NESTED_HWVIRT
    40284046/**
     4047 * Wrapper for running the nested-guest code in AMD-V.
     4048 *
     4049 * @returns VBox strict status code.
     4050 * @param   pVM         The cross context VM structure.
     4051 * @param   pVCpu       The cross context virtual CPU structure.
     4052 * @param   pCtx        Pointer to the guest-CPU context.
     4053 *
     4054 * @remarks No-long-jump zone!!!
     4055 */
     4056DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     4057{
     4058    /*
     4059     * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
     4060     * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
     4061     * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
     4062     */
     4063#ifdef VBOX_WITH_KERNEL_USING_XMM
     4064    return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
     4065                             pVCpu->hm.s.svm.pfnVMRun);
     4066#else
     4067    return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
     4068#endif
     4069}
     4070
     4071
     4072/**
    40294073 * Performs some essential restoration of state after running nested-guest code in
    40304074 * AMD-V.
     
    43934437         */
    43944438        hmR0SvmPreRunGuestCommittedNested(pVM, pVCpu, pCtx, &SvmTransient);
    4395         rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4439
     4440        rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
    43964441
    43974442        /* Restore any residual host-state and save any bits shared between host
     
    43994444        hmR0SvmPostRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient, rc);
    44004445
     4446        /** @todo This needs some work... we probably should cause a \#VMEXIT on
     4447         *        SVM_EXIT_INVALID and handle rc != VINF_SUCCESS differently. */
    44014448        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
    44024449                        || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
     
    46994746        case SVM_EXIT_INTR:
    47004747        {
    4701             if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR)
    4702                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4748            /* We shouldn't direct physical interrupts to the nested-guest. */
    47034749            return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
    47044750        }
     
    57545800{
    57555801    /*
    5756      * Disable the global interrupt flag to not cause any interrupts or NMIs
    5757      * in the guest.
    5758      */
    5759     pCtx->hwvirt.svm.fGif = 0;
    5760 
    5761     /*
    5762      * Restore the guest's "host" state.
    5763      */
    5764     CPUMSvmVmExitRestoreHostState(pCtx);
    5765 
    5766     /*
    5767      * Restore the guest's force-flags.
    5768      */
    5769     if (pCtx->hwvirt.fLocalForcedActions)
    5770     {
    5771         VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
    5772         pCtx->hwvirt.fLocalForcedActions = 0;
    5773     }
    5774 
    5775     /*
    57765802     * Restore the modifications we did to the nested-guest VMCB in order
    57775803     * to execute the nested-guest in SVM R0.
     
    57845810
    57855811    /*
    5786      * Write the nested-guest VMCB back to nested-guest memory.
     5812     * Write the nested-guest VMCB back to guest memory.
    57875813     */
    57885814    RTGCPHYS const GCPhysVmcb = pCtx->hwvirt.svm.GCPhysVmcb;
     
    57955821    memset(pVmcbNstGstCtrl, 0, sizeof(*pVmcbNstGstCtrl));
    57965822    Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     5823
     5824    /*
     5825     * Disable the global interrupt flag to not cause any interrupts or NMIs
     5826     * in the guest.
     5827     */
     5828    pCtx->hwvirt.svm.fGif = 0;
     5829
     5830    /*
     5831     * Restore the guest's "host" state.
     5832     */
     5833    CPUMSvmVmExitRestoreHostState(pCtx);
     5834
     5835    /*
     5836     * Restore the guest's force-flags.
     5837     */
     5838    if (pCtx->hwvirt.fLocalForcedActions)
     5839    {
     5840        VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
     5841        pCtx->hwvirt.fLocalForcedActions = 0;
     5842    }
    57975843
    57985844    /*
     
    59145960         * IO permission bitmap (IOPM).
    59155961         */
    5916         RTHCPHYS HCPhysNstGstMsrpm;
    5917         rc = PGMPhysGCPhys2HCPhys(pVM, pVmcbNstGstCtrl->u64MSRPMPhysAddr, &HCPhysNstGstMsrpm);
     5962        RTGCPHYS const GCPhysIOBitmap = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     5963        rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
     5964                                     SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
    59185965        if (RT_FAILURE(rc))
    59195966        {
    5920             Log(("hmR0SvmExecVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
     5967            Log(("hmR0SvmExecVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
    59215968            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    59225969            return hmR0SvmExecVmexit(pVCpu, pCtx);
     
    60346081        pCtx->hwvirt.svm.fGif = 1;
    60356082
     6083        Log4(("hmR0SvmExecVmrun: CR0=%#RX32 CR3=%#RX64 CR4=%#RX32\n", pCtx->cr0, pCtx->cr3, pCtx->cr4));
    60366084        return hmR0SvmNstGstWorldSwitch(pVCpu, pCtx);
    60376085    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette