VirtualBox

Changeset 20516 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 12, 2009 1:46:08 PM (16 years ago)
Author:
vboxsync
Message:

Moved VBOX_WITH_VMMR0_DISABLE_PREEMPTION blocks down

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r20491 r20516  
    945945    }
    946946
     947    /* When external interrupts are pending, we should exit the VM when IF is set. */
     948    /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
     949    rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx);
     950    if (RT_FAILURE(rc))
     951    {
     952        STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     953        goto end;
     954    }
     955
     956    /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */
     957    /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */
     958    if (pVM->hwaccm.s.fHasIoApic)
     959    {
     960        bool fPending;
     961
     962        /* TPR caching in CR8 */
     963        int rc = PDMApicGetTPR(pVCpu, &u8LastVTPR, &fPending);
     964        AssertRC(rc);
     965        pVMCB->ctrl.IntCtrl.n.u8VTPR = u8LastVTPR;
     966
     967        if (fPending)
     968        {
     969            /* A TPR change could activate a pending interrupt, so catch cr8 writes. */
     970            pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);
     971        }
     972        else
     973            /* No interrupts are pending, so we don't need to be explicitely notified.
     974             * There are enough world switches for detecting pending interrupts.
     975             */
     976            pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
     977
     978        fSyncTPR = !fPending;
     979    }
     980
     981    /* All done! Let's start VM execution. */
     982    STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatInGC, x);
     983
     984    /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
     985    pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
     986
     987#ifdef LOG_ENABLED
     988    pCpu = HWACCMR0GetCurrentCpu();
     989    if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
     990        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     991    {
     992        if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
     993            Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
     994        else
     995            Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     996    }
     997    if (pCpu->fFlushTLB)
     998        Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu));
     999#endif
     1000
     1001    /*
     1002     * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
     1003     *       (until the actual world switch)
     1004     */
     1005#ifdef VBOX_STRICT
     1006    idCpuCheck = RTMpCpuId();
     1007#endif
     1008#ifdef LOG_ENABLED
     1009    VMMR0LogFlushDisable(pVCpu);
     1010#endif
     1011
     1012    /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */
     1013    rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
     1014    if (rc != VINF_SUCCESS)
     1015    {
     1016        STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     1017        goto end;
     1018    }
     1019
    9471020#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    9481021    /*
     
    9501023     *
    9511024     * Interrupts are disabled before the call to make sure we don't miss any interrupt
    952      * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
    953      * further down, but SVMR0CheckPendingInterrupt makes that hard.)
     1025     * that would flag preemption (IPI, timer tick, ++).
    9541026     *
    9551027     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
     
    9641036    }
    9651037    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    966 #endif
    967 
    968     /* When external interrupts are pending, we should exit the VM when IF is set. */
    969     /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
    970     rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx);
    971     if (RT_FAILURE(rc))
    972     {
    973         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
    974         goto end;
    975     }
    976 
    977     /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */
    978     /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */
    979     if (pVM->hwaccm.s.fHasIoApic)
    980     {
    981         bool fPending;
    982 
    983         /* TPR caching in CR8 */
    984         int rc = PDMApicGetTPR(pVCpu, &u8LastVTPR, &fPending);
    985         AssertRC(rc);
    986         pVMCB->ctrl.IntCtrl.n.u8VTPR = u8LastVTPR;
    987 
    988         if (fPending)
    989         {
    990             /* A TPR change could activate a pending interrupt, so catch cr8 writes. */
    991             pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);
    992         }
    993         else
    994             /* No interrupts are pending, so we don't need to be explicitely notified.
    995              * There are enough world switches for detecting pending interrupts.
    996              */
    997             pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
    998 
    999         fSyncTPR = !fPending;
    1000     }
    1001 
    1002     /* All done! Let's start VM execution. */
    1003     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatInGC, x);
    1004 
    1005     /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
    1006     pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
    1007 
    1008 #ifdef LOG_ENABLED
    1009     pCpu = HWACCMR0GetCurrentCpu();
    1010     if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
    1011         ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1012     {
    1013         if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
    1014             Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
    1015         else
    1016             Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1017     }
    1018     if (pCpu->fFlushTLB)
    1019         Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu));
    1020 #endif
    1021 
    1022     /*
    1023      * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
    1024      *       (until the actual world switch)
    1025      */
    1026 #ifdef VBOX_STRICT
    1027     idCpuCheck = RTMpCpuId();
    1028 #endif
    1029 #ifdef LOG_ENABLED
    1030     VMMR0LogFlushDisable(pVCpu);
    1031 #endif
    1032 
    1033     /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */
    1034     rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
    1035     if (rc != VINF_SUCCESS)
    1036     {
    1037         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
    1038         goto end;
    1039     }
    1040 
    1041 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     1038#else
    10421039    /* Disable interrupts to make sure a poke will interrupt execution.
    10431040     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r20491 r20516  
    21802180    }
    21812181
    2182 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    2183     /*
    2184      * Exit to ring-3 preemption/work is pending.
    2185      *
    2186      * Interrupts are disabled before the call to make sure we don't miss any interrupt
    2187      * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
    2188      * further down, but VMXR0CheckPendingInterrupt makes that hard.)
    2189      *
    2190      * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
    2191      *       shootdowns rely on this.
    2192      */
    2193     uOldEFlags = ASMIntDisableFlags();
    2194     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    2195     {
    2196         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
    2197         rc = VINF_EM_RAW_INTERRUPT;
    2198         goto end;
    2199     }
    2200     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    2201 #endif
    2202 
    22032182    /* When external interrupts are pending, we should exit the VM when IF is set. */
    22042183    /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
     
    22852264        goto end;
    22862265
    2287 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     2266#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     2267    /*
     2268     * Exit to ring-3 preemption/work is pending.
     2269     *
     2270     * Interrupts are disabled before the call to make sure we don't miss any interrupt
     2271     * that would flag preemption (IPI, timer tick, ++).
     2272     *
     2273     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
     2274     *       shootdowns rely on this.
     2275     */
     2276    uOldEFlags = ASMIntDisableFlags();
     2277    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     2278    {
     2279        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     2280        rc = VINF_EM_RAW_INTERRUPT;
     2281        goto end;
     2282    }
     2283    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     2284#else
    22882285    /* Disable interrupts to make sure a poke will interrupt execution.
    22892286     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette