VirtualBox

Changeset 48130 in vbox for trunk/src


Ignore:
Timestamp:
Aug 28, 2013 5:14:38 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
88510
Message:

VMM: Enable thread-context hooks. Reimplemented event injection logic for VT-x and AMD-V.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48044 r48130  
    196196{
    197197    /** The host's rflags/eflags. */
    198     RTCCUINTREG     uEFlags;
     198    RTCCUINTREG     uEflags;
    199199#if HC_ARCH_BITS == 32
    200200    uint32_t        u32Alignment0;
     
    23652365
    23662366/**
    2367  * Injects any pending events into the guest if the guest is in a state to
    2368  * receive them.
     2367 * Evaluates the event to be delivered to the guest and sets it as the pending
     2368 * event.
    23692369 *
    23702370 * @param   pVCpu       Pointer to the VMCPU.
    23712371 * @param   pCtx        Pointer to the guest-CPU context.
    23722372 */
    2373 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
    2374 {
    2375     Assert(!TRPMHasTrap(pVCpu));
     2373static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     2374{
     2375    Assert(!pVCpu->hm.s.Event.fPending);
    23762376    Log4Func(("\n"));
    23772377
     
    23822382    SVMEVENT Event;
    23832383    Event.u = 0;
    2384     if (pVCpu->hm.s.Event.fPending)                                /* First, inject any pending HM events. */
    2385     {
    2386         Event.u = pVCpu->hm.s.Event.u64IntrInfo;
    2387         Assert(Event.n.u1Valid);
    2388         bool fInject = true;
    2389         if (   Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
    2390             && (   fBlockInt
    2391                 || fIntShadow))
    2392         {
    2393             fInject = false;
    2394         }
    2395         else if (   Event.n.u3Type == SVM_EVENT_NMI
    2396                  && fIntShadow)
    2397         {
    2398             fInject = false;
    2399         }
    2400 
    2401         if (fInject)
    2402         {
    2403             Log4(("Injecting pending HM event.\n"));
    2404 
    2405             hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
    2406             pVCpu->hm.s.Event.fPending = false;
    2407 
    2408 #ifdef VBOX_WITH_STATISTICS
    2409             if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
    2410                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
    2411             else
    2412                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    2413 #endif
    2414         }
    2415         else
    2416             hmR0SvmSetVirtIntrIntercept(pVmcb);
    2417     }                                                              /** @todo SMI. SMIs take priority over NMIs. */
    2418     else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts . */
     2384                                                              /** @todo SMI. SMIs take priority over NMIs. */
     2385    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts . */
    24192386    {
    24202387        if (!fIntShadow)
    24212388        {
    2422             Log4(("Injecting NMI\n"));
     2389            Log4(("Pending NMI\n"));
    24232390
    24242391            Event.n.u1Valid  = 1;
     
    24262393            Event.n.u3Type   = SVM_EVENT_NMI;
    24272394
    2428             hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
     2395            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    24292396            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    2430 
    2431             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    24322397        }
    24332398        else
     
    24362401    else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
    24372402    {
    2438         /* Check if there are guest external interrupts (PIC/APIC) pending and inject them, if the guest can receive them. */
     2403        /*
     2404         * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
     2405         * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
     2406         * evaluated here and not set as pending, solely based on the force-flags.
     2407         */
    24392408        if (   !fBlockInt
    24402409            && !fIntShadow)
     
    24502419                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    24512420
    2452                 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
    2453                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     2421                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    24542422            }
    24552423            else
     
    24622430        else
    24632431            hmR0SvmSetVirtIntrIntercept(pVmcb);
     2432    }
     2433}
     2434
     2435
     2436/**
     2437 * Injects any pending events into the guest if the guest is in a state to
     2438 * receive them.
     2439 *
     2440 * @param   pVCpu       Pointer to the VMCPU.
     2441 * @param   pCtx        Pointer to the guest-CPU context.
     2442 */
     2443static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     2444{
     2445    Assert(!TRPMHasTrap(pVCpu));
     2446    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     2447    Log4Func(("\n"));
     2448
     2449    const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx);
     2450    const bool fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
     2451    PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     2452
     2453    SVMEVENT Event;
     2454    Event.u = 0;
     2455    if (pVCpu->hm.s.Event.fPending)                                /* First, inject any pending HM events. */
     2456    {
     2457#if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS)
     2458        Event.u = pVCpu->hm.s.Event.u64IntrInfo;
     2459        Assert(Event.n.u1Valid);
     2460        bool fInject = true;
     2461        if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
     2462        {
     2463            Assert(!fBlockInt);
     2464            Assert(!fIntShadow);
     2465        }
     2466        else if (Event.n.u3Type == SVM_EVENT_NMI)
     2467            Assert(!fIntShadow);
     2468#endif
     2469
     2470        Log4(("Injecting pending HM event.\n"));
     2471        hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
     2472        pVCpu->hm.s.Event.fPending = false;
     2473
     2474#ifdef VBOX_WITH_STATISTICS
     2475        if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
     2476            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     2477        else
     2478            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
     2479#endif
    24642480    }
    24652481
     
    27192735        return rc;
    27202736
     2737    if (TRPMHasTrap(pVCpu))
     2738        hmR0SvmTrpmTrapToPendingEvent(pVCpu);
     2739    else if (!pVCpu->hm.s.Event.fPending)
     2740        hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
     2741
    27212742#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    2722     /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
    2723     pSvmTransient->uEFlags = ASMIntDisableFlags();
    2724     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    2725     {
    2726         ASMSetFlags(pSvmTransient->uEFlags);
     2743    /*
     2744     * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
     2745     * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
     2746     *
     2747     * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
     2748     * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
     2749     *
     2750     * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
     2751     * executing guest code.
     2752     */
     2753    pSvmTransient->uEflags = ASMIntDisableFlags();
     2754    if (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
     2755        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     2756    {
     2757        ASMSetFlags(pSvmTransient->uEflags);
     2758        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     2759        return VINF_EM_RAW_TO_R3;
     2760    }
     2761    else if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     2762    {
     2763        ASMSetFlags(pSvmTransient->uEflags);
    27272764        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    2728         /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
    27292765        return VINF_EM_RAW_INTERRUPT;
    27302766    }
     2767
     2768    /* Indicate the start of guest execution. No more longjmps or returns to ring-3 from this point!!! */
    27312769    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    27322770    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    27332771#endif
    2734 
    2735     /* Convert any pending TRPM traps to HM events for injection. */
    2736     /** @todo Optimization: move this before disabling interrupts, restore state
    2737      *        using pVmcb->ctrl.EventInject.u. */
    2738     if (TRPMHasTrap(pVCpu))
    2739         hmR0SvmTrpmTrapToPendingEvent(pVCpu);
    2740 
    2741     hmR0SvmInjectPendingEvent(pVCpu, pCtx);
    27422772
    27432773    return VINF_SUCCESS;
     
    27652795#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    27662796    /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
    2767     pSvmTransient->uEFlags = ASMIntDisableFlags();
     2797    /** @todo get rid of this. */
     2798    pSvmTransient->uEflags = ASMIntDisableFlags();
    27682799    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    27692800#endif
     2801
     2802    hmR0SvmInjectPendingEvent(pVCpu, pCtx);
    27702803
    27712804    /*
     
    29132946
    29142947    Assert(!(ASMGetFlags() & X86_EFL_IF));
    2915     ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
     2948    ASMSetFlags(pSvmTransient->uEflags);                        /* Enable interrupts. */
    29162949
    29172950    VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pMixedCtx);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48082 r48130  
    63456345
    63466346/**
    6347  * Injects any pending events into the guest if the guest is in a state to
    6348  * receive them.
    6349  *
    6350  * @returns VBox status code (informational status codes included).
     6347 * Evaluates the event to be delivered to the guest and sets it as the pending
     6348 * event.
     6349 *
    63516350 * @param   pVCpu           Pointer to the VMCPU.
    63526351 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    63546353 *                          before using them.
    63556354 */
    6356 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6357 {
     6355static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6356{
     6357    Assert(!pVCpu->hm.s.Event.fPending);
     6358
    63586359    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
    63596360    uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
     
    63646365    Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
    63656366           && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
    6366     Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);       /* Cannot set block-by-STI when interrupts are disabled. */
     6367    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    63676368    Assert(!TRPMHasTrap(pVCpu));
    63686369
    6369     int rc = VINF_SUCCESS;
    6370     if (pVCpu->hm.s.Event.fPending)     /* First, inject any pending HM events. */
    6371     {
    6372         uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
    6373         bool fInject = true;
    6374         if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
    6375         {
    6376             rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    6377             AssertRCReturn(rc, rc);
    6378             const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
    6379             if (   fBlockInt
    6380                 || fBlockSti
    6381                 || fBlockMovSS)
    6382             {
    6383                 fInject = false;
    6384             }
    6385         }
    6386         else if (   uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
    6387                  && (   fBlockMovSS
    6388                      || fBlockSti))
    6389         {
    6390             /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
    6391             fInject = false;
    6392         }
    6393 
    6394         if (fInject)
    6395         {
    6396             Log4(("Injecting pending event vcpu[%RU32]\n", pVCpu->idCpu));
    6397             rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
    6398                                         pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
    6399             AssertRCReturn(rc, rc);
    6400             pVCpu->hm.s.Event.fPending = false;
    6401 
    6402 #ifdef VBOX_WITH_STATISTICS
    6403             if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
    6404                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
    6405             else
    6406                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    6407 #endif
    6408         }
    6409         else
    6410             hmR0VmxSetIntWindowExitVmcs(pVCpu);
    6411     }                                                           /** @todo SMI. SMIs take priority over NMIs. */
    6412     else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))    /* NMI. NMIs take priority over regular interrupts . */
     6370                                                           /** @todo SMI. SMIs take priority over NMIs. */
     6371    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))    /* NMI. NMIs take priority over regular interrupts . */
    64136372    {
    64146373        /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
     
    64166375            && !fBlockSti)
    64176376        {
    6418             Log4(("Injecting NMI\n"));
     6377            /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
     6378            Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
    64196379            uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
    64206380            u32IntrInfo         |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    6421             rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
    6422                                         0 /* GCPtrFaultAddress */, &uIntrState);
    6423             AssertRCReturn(rc, rc);
     6381
     6382            hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddres */);
    64246383            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    6425 
    6426             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    64276384        }
    64286385        else
     
    64326389             && !pVCpu->hm.s.fSingleInstruction)
    64336390    {
    6434         /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
    6435         rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    6436         AssertRCReturn(rc, rc);
     6391        /*
     6392         * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
     6393         * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
     6394         * evaluated here and not set as pending, solely based on the force-flags.
     6395         */
     6396        int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     6397        AssertRC(rc);
    64376398        const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
    64386399        if (   !fBlockInt
     
    64446405            if (RT_SUCCESS(rc))
    64456406            {
    6446                 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
     6407                Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
    64476408                uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
    64486409                u32IntrInfo         |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    6449                 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */,  0 /* u32ErrCode */,
    6450                                             0 /* GCPtrFaultAddress */, &uIntrState);
    6451 
    6452                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     6410
     6411                hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
    64536412            }
    64546413            else
     
    64576416                Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
    64586417                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    6459                 rc = VINF_SUCCESS;
    64606418            }
    64616419        }
     
    64636421            hmR0VmxSetIntWindowExitVmcs(pVCpu);
    64646422    }
    6465 
    6466     /*
    6467      * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
    6468      * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
    6469      */
    6470     fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    6471     fBlockSti   = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     6423}
     6424
     6425
     6426/**
     6427 * Injects any pending events into the guest if the guest is in a state to
     6428 * receive them.
     6429 *
     6430 * @returns VBox status code (informational status codes included).
     6431 * @param   pVCpu           Pointer to the VMCPU.
     6432 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     6433 *                          out-of-sync. Make sure to update the required fields
     6434 *                          before using them.
     6435 *
     6436 * @remarks No-long-jump zone!!!
     6437 */
     6438static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6439{
     6440    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
     6441    uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
     6442    bool fBlockMovSS    = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
     6443    bool fBlockSti      = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     6444
     6445    Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
     6446    Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
     6447           && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
     6448    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);       /* Cannot set block-by-STI when interrupts are disabled. */
     6449    Assert(!TRPMHasTrap(pVCpu));
     6450
     6451    int rc = VINF_SUCCESS;
     6452    if (pVCpu->hm.s.Event.fPending)
     6453    {
     6454#if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS)
     6455        uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
     6456        if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
     6457        {
     6458            rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     6459            AssertRCReturn(rc, rc);
     6460            const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
     6461            Assert(!fBlockInt);
     6462            Assert(!fBlockSti);
     6463            Assert(!fBlockMovSS);
     6464        }
     6465        else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
     6466        {
     6467            Assert(!fBlockSti);
     6468            Assert(!fBlockMovSS);
     6469        }
     6470#endif
     6471        Log4(("Injecting pending event vcpu[%RU32] u64IntrInfo=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntrInfo));
     6472        rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
     6473                                    pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
     6474        AssertRCReturn(rc, rc);
     6475
     6476        pVCpu->hm.s.Event.fPending = false;
     6477
     6478        /* Update the interruptibility-state as it could have been changed by
     6479           hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
     6480        fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
     6481        fBlockSti   = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     6482
     6483#ifdef VBOX_WITH_STATISTICS
     6484        if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
     6485            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     6486        else
     6487            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
     6488#endif
     6489    }
     6490
     6491    /* Delivery pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
    64726492    int rc2 = VINF_SUCCESS;
    64736493    if (   fBlockSti
     
    73237343
    73247344    /*
    7325      * When thread-context hooks are used, load the required guest-state bits
    7326      * here before we go ahead and disable interrupts. We can handle getting preempted
     7345     * When thread-context hooks are used, load the required guest-state bits here
     7346     * before we go ahead and disable interrupts. We can handle getting preempted
    73277347     * while loading the guest state.
    73287348     */
     
    73307350        hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
    73317351
     7352    /*
     7353     * Evaluate events as pending-for-injection into the guest. Toggling of force-flags here is safe as long as
     7354     * we update TRPM on premature exits to ring-3 before executing guest code. We must NOT restore the force-flags.
     7355     */
     7356    if (TRPMHasTrap(pVCpu))
     7357        hmR0VmxTrpmTrapToPendingEvent(pVCpu);
     7358    else if (!pVCpu->hm.s.Event.fPending)
     7359        hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
     7360
    73327361#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    7333     /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
     7362    /*
     7363     * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
     7364     * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
     7365     *
     7366     * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
     7367     * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
     7368     *
     7369     * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
     7370     * executing guest code.
     7371     */
    73347372    pVmxTransient->uEflags = ASMIntDisableFlags();
    7335     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     7373    if (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
     7374        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     7375    {
     7376        ASMSetFlags(pVmxTransient->uEflags);
     7377        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     7378        return VINF_EM_RAW_TO_R3;
     7379    }
     7380    else if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    73367381    {
    73377382        ASMSetFlags(pVmxTransient->uEflags);
    73387383        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    7339         /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
    73407384        return VINF_EM_RAW_INTERRUPT;
    73417385    }
     7386
     7387    /* Indicate the start of guest execution. No more longjmps or returns to ring-3 from this point!!! */
    73427388    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    73437389    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     
    73457391
    73467392    /*
    7347      * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
    7348      * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
    7349      * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
     7393     * Event injection might result in triple-faulting the VM (real-on-v86 case), which is why it's
     7394     * done here and not in hmR0VmxPreRunGuestCommitted() which doesn't expect failures.
    73507395     */
    7351     /** @todo Rework event evaluation and injection to be completely separate.
    7352      *  Update: Tried it, problem with handling halts. Control never returns to VT-x
    7353      *        if we exit VT-x with external interrupt pending in a TRPM event.
    7354      *        The EM loop probably needs to check for interrupts while halting. */
    7355     if (TRPMHasTrap(pVCpu))
    7356         hmR0VmxTrpmTrapToPendingEvent(pVCpu);
    7357 
    73587396    rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
    7359     AssertRCReturn(rc, rc);
    73607397    return rc;
    73617398}
     
    73837420#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    73847421    /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
     7422    /** @todo get rid of this. */
    73857423    pVmxTransient->uEflags = ASMIntDisableFlags();
    73867424    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     
    73897427
    73907428    /*
    7391      * Load the host state bits as we may've been preempted
    7392      * (only happens when thread-context hooks are used).
     7429     * Load the host state bits as we may've been preempted (only happens when
     7430     * thread-context hooks are used).
    73937431     */
    7394     int rc = VINF_SUCCESS;
    73957432    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
    73967433    {
    73977434        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
    7398         rc = hmR0VmxSaveHostState(pVM, pVCpu);
     7435        int rc = hmR0VmxSaveHostState(pVM, pVCpu);
    73997436        AssertRC(rc);
    74007437    }
     
    74027439
    74037440    /*
    7404      * When thread-context hooks are not used we need to load the required
    7405      * guest state bits here i.e. when we can no longer be preempted.
     7441     * When thread-context hooks are -not- used we need to load the required
     7442     * guest state bits here i.e. when we can no longer be rescheduled.
    74067443     */
    74077444    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     
    74117448        /*
    74127449         * If we are injecting events to a real-on-v86 mode guest, we may have to update
    7413          * RIP and soem other registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
     7450         * RIP and some other registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
    74147451         * Reload only the necessary state, the assertion will catch if other parts of the code
    74157452         * change.
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r48083 r48130  
    441441    VMCPU_ASSERT_EMT(pVCpu);
    442442    Assert(pVCpu->vmm.s.hR0ThreadCtx == NIL_RTTHREADCTX);
    443 #if 0 /* Not stable yet. */
     443#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
    444444    int rc = RTThreadCtxHooksCreate(&pVCpu->vmm.s.hR0ThreadCtx);
    445445    if (   RT_FAILURE(rc)
     
    546546             * infinitum). Let's just disable preemption for now...
    547547             */
    548             bool fPreemptDisabled = false;
    549             RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    550             if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    551             {
    552                 RTThreadPreemptDisable(&PreemptState);
    553                 fPreemptDisabled = true;
    554             }
     548            HM_DISABLE_PREEMPT_IF_NEEDED();
    555549
    556550            /* We need to update the VCPU <-> host CPU mapping. */
     
    562556
    563557            /* Restore preemption. */
    564             if (fPreemptDisabled)
    565                 RTThreadPreemptRestore(&PreemptState);
     558            HM_RESTORE_PREEMPT_IF_NEEDED();
    566559            break;
    567560        }
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r47808 r48130  
    25272527                        }
    25282528                    }
     2529                    else if (TRPMHasTrap(pVCpu))
     2530                        rc = VINF_EM_RESCHEDULE;
    25292531                    else
    25302532                        rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
  • trunk/src/VBox/VMM/include/HMInternal.h

    r48044 r48130  
    136136                                                  | HM_CHANGED_GUEST_DEBUG)
    137137/** @} */
    138 
    139 /** Disables preemption if required. */
    140 # define HM_DISABLE_PREEMPT_IF_NEEDED() \
    141    RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
    142    bool fPreemptDisabledInternal = false; \
    143    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) \
    144    { \
    145        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); \
    146        RTThreadPreemptDisable(&PreemptStateInternal); \
    147        fPreemptDisabledInternal = true; \
    148    }
    149 
    150 /** Restores preemption if previously disabled by HM_DISABLE_PREEMPT(). */
    151 # define HM_RESTORE_PREEMPT_IF_NEEDED() \
    152    do \
    153    { \
    154         if (fPreemptDisabledInternal) \
    155             RTThreadPreemptRestore(&PreemptStateInternal); \
    156    } while (0)
    157138
    158139/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette