VirtualBox

Changeset 46394 in vbox


Ignore:
Timestamp:
Jun 5, 2013 11:29:51 AM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46365 r46394  
    247247    if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
    248248    {
    249         Log(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
     249        Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
    250250        pVM->hm.s.svm.fAlwaysFlushTLB = true;
    251251    }
     
    506506}
    507507
     508
     509/**
     510 * Flushes the appropriate tagged-TLB entries.
     511 *
     512 * @param    pVM        Pointer to the VM.
     513 * @param    pVCpu      Pointer to the VMCPU.
     514 */
     515static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu)
     516{
     517    PVM pVM              = pVCpu->CTX_SUFF(pVM);
     518    PSVMVMCB pVmcb       = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     519    PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     520
     521    /*
     522     * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
     523     * This can happen both for start & resume due to long jumps back to ring-3.
     524     * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
     525     * so we cannot reuse the ASIDs without flushing.
     526     */
     527    bool fNewAsid = false;
     528    if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
     529        || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     530    {
     531        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
     532        pVCpu->hm.s.fForceTLBFlush = true;
     533        fNewAsid = true;
     534    }
     535
     536    /* Set TLB flush state as checked until we return from the world switch. */
     537    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
     538
     539    /* Check for explicit TLB shootdowns. */
     540    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
     541    {
     542        pVCpu->hm.s.fForceTLBFlush = true;
     543        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
     544    }
     545
     546    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     547    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
     548
     549    if (pVM->hm.s.svm.fAlwaysFlushTLB)
     550    {
     551        /*
     552         * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
     553         */
     554        pCpu->uCurrentAsid               = 1;
     555        pVCpu->hm.s.uCurrentAsid         = 1;
     556        pVCpu->hm.s.cTlbFlushes          = pCpu->cTlbFlushes;
     557        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     558    }
     559    else if (pVCpu->hm.s.fForceTLBFlush)
     560    {
     561        if (fNewAsid)
     562        {
     563            ++pCpu->uCurrentAsid;
     564            bool fHitASIDLimit = false;
     565            if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     566            {
     567                pCpu->uCurrentAsid        = 1;      /* Wraparound at 1; host uses 0 */
     568                pCpu->cTlbFlushes++;                /* All VCPUs that run on this host CPU must use a new VPID. */
     569                fHitASIDLimit             = true;
     570
     571                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     572                {
     573                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     574                    pCpu->fFlushAsidBeforeUse = true;
     575                }
     576                else
     577                {
     578                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     579                    pCpu->fFlushAsidBeforeUse = false;
     580                }
     581            }
     582
     583            if (   !fHitASIDLimit
     584                && pCpu->fFlushAsidBeforeUse)
     585            {
     586                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     587                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     588                else
     589                {
     590                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     591                    pCpu->fFlushAsidBeforeUse = false;
     592                }
     593            }
     594
     595            pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
     596            pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
     597        }
     598        else
     599        {
     600            if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     601                pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     602            else
     603                pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     604        }
     605
     606        pVCpu->hm.s.fForceTLBFlush = false;
     607    }
     608    else
     609    {
     610        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
     611         *        not be executed. See hmQueueInvlPage() where it is commented
     612         *        out. Support individual entry flushing someday. */
     613        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     614        {
     615            /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
     616            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
     617            for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     618                SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
     619        }
     620    }
     621
     622    pVCpu->hm.s.TlbShootdown.cPages = 0;
     623    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
     624
     625    /* Update VMCB with the ASID. */
     626    pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
     627
     628    AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
     629              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
     630    AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     631              ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
     632    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
     633              ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     634
     635#ifdef VBOX_WITH_STATISTICS
     636    if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
     637        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
     638    else if (   pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
     639             || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
     640    {
     641        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
     642    }
     643    else
     644        Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE)
     645#endif
     646}
     647
     648
     649
     650#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     651/**
     652 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
     653 *
     654 * @returns VBox status code.
     655 * @param   HCPhysVmcbHost  Physical address of host VMCB.
     656 * @param   HCPhysVmcb      Physical address of the VMCB.
     657 * @param   pCtx            Pointer to the guest-CPU context.
     658 * @param   pVM             Pointer to the VM.
     659 * @param   pVCpu           Pointer to the VMCPU.
     660 */
     661DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
     662{
     663    uint32_t aParam[4];
     664    aParam[0] = (uint32_t)(HCPhysVmcbHost);             /* Param 1: HCPhysVmcbHost - Lo. */
     665    aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32);       /* Param 1: HCPhysVmcbHost - Hi. */
     666    aParam[2] = (uint32_t)(HCPhysVmcb);                 /* Param 2: HCPhysVmcb - Lo. */
     667    aParam[3] = (uint32_t)(HCPhysVmcb >> 32);           /* Param 2: HCPhysVmcb - Hi. */
     668
     669    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]);
     670}
     671
     672
     673/**
     674 * Executes the specified VMRUN handler in 64-bit mode.
     675 *
     676 * @returns VBox status code.
     677 * @param   pVM         Pointer to the VM.
     678 * @param   pVCpu       Pointer to the VMCPU.
     679 * @param   pCtx        Pointer to the guest-CPU context.
     680 * @param   enmOp       The operation to perform.
     681 * @param   cbParam     Number of parameters.
     682 * @param   paParam     Array of 32-bit parameters.
     683 */
     684VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
     685                                         uint32_t *paParam)
     686{
     687    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     688    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
     689
     690    /* Disable interrupts. */
     691    RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
     692
     693#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
     694    RTCPUID idHostCpu = RTMpCpuId();
     695    CPUMR0SetLApic(pVM, idHostCpu);
     696#endif
     697
     698    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
     699    CPUMSetHyperEIP(pVCpu, enmOp);
     700    for (int i = (int)cbParam - 1; i >= 0; i--)
     701        CPUMPushHyper(pVCpu, paParam[i]);
     702
     703    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
     704    /* Call the switcher. */
     705    int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     706    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
     707
     708    /* Restore interrupts. */
     709    ASMSetFlags(uOldEFlags);
     710    return rc;
     711}
     712
     713#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
     714
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette