VirtualBox

Changeset 48250 in vbox for trunk/src


Ignore:
Timestamp:
Sep 3, 2013 3:00:18 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Undo injected event on premature returns to ring-3. Should fix longjmp disallowed ring-0 assertions on real-on-v86 event injection

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48248 r48250  
    316316static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
    317317static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
     318static void               hmR0VmxClearEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx);
    318319static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
    319320                                                 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
     
    64756476 *                          out-of-sync. Make sure to update the required fields
    64766477 *                          before using them.
    6477  *
    6478  * @remarks No-long-jump zone!!!
    64796478 */
    64806479static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    64816480{
     6481    HMVMX_ASSERT_PREEMPT_SAFE();
     6482    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     6483
    64826484    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
    64836485    uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
     
    65156517                                    pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
    65166518        AssertRCReturn(rc, rc);
    6517 
    6518         pVCpu->hm.s.Event.fPending = false;
    65196519
    65206520        /* Update the interruptibility-state as it could have been changed by
     
    67336733 *                              necessary. This cannot not be NULL.
    67346734 *
     6735 * @remarks Requires CR0!
    67356736 * @remarks No-long-jump zone!!!
    6736  * @remarks Requires CR0!
    67376737 */
    67386738static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
     
    68726872                }
    68736873                Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
     6874
     6875                /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
     6876                   it, if we are returning to ring-3 before executing guest code. */
     6877                pVCpu->hm.s.Event.fPending = false;
    68746878            }
    68756879            Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
     
    69086912    AssertRCReturn(rc, rc);
    69096913    return rc;
     6914}
     6915
     6916
     6917/**
     6918 * Clears the current event in the VMCS.
     6919 *
     6920 * @returns VBox status code.
     6921 * @param   pVCpu         Pointer to the VMCPU.
     6922 *
     6923 * @remarks Use this function only to clear events that have not yet been
     6924 *          delivered to the guest but are injected in the VMCS!
     6925 * @remarks No-long-jump zone!!!
     6926 */
     6927static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
     6928{
     6929    if (!pVCpu->hm.s.Event.fPending)
     6930        return;
     6931
     6932#ifdef VBOX_STRICT
     6933    uint32_t u32EntryInfo;
     6934    int rc2 = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
     6935    AssertRC(rc2);
     6936    Assert(VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo));
     6937#endif
     6938
     6939    /* Clear the entry-interruption field (including the valid bit). */
     6940    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
     6941    AssertRC(rc);
     6942
     6943    /* Clear the pending debug exception field. */
     6944    rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
     6945    AssertRC(rc);
    69106946}
    69116947
     
    73507386    else if (!pVCpu->hm.s.Event.fPending)
    73517387        hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
     7388
     7389    /*
     7390     * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
     7391     * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
     7392     */
     7393    rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
     7394    if (RT_UNLIKELY(rc != VINF_SUCCESS))
     7395    {
     7396        Assert(rc == VINF_EM_RESET);
     7397        return rc;
     7398    }
    73527399
    73537400    /*
     
    73727419        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    73737420    {
     7421        hmR0VmxClearEventVmcs(pVCpu);
    73747422        ASMSetFlags(pVmxTransient->uEflags);
    73757423        VMMRZCallRing3Enable(pVCpu);
     
    73797427    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    73807428    {
     7429        hmR0VmxClearEventVmcs(pVCpu);
    73817430        ASMSetFlags(pVmxTransient->uEflags);
    73827431        VMMRZCallRing3Enable(pVCpu);
     
    73857434    }
    73867435
    7387     /*
    7388      * Event injection might result in triple-faulting the VM (real-on-v86 case), which is why it's
    7389      * done here and not in hmR0VmxPreRunGuestCommitted() which doesn't expect failures.
    7390      */
    7391     rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
    7392     if (RT_UNLIKELY(rc != VINF_SUCCESS))
    7393     {
    7394         ASMSetFlags(pVmxTransient->uEflags);
    7395         VMMRZCallRing3Enable(pVCpu);
    7396         return rc;
    7397     }
     7436    /* We've injected any pending events. This is really the point of no return (to ring-3). */
     7437    pVCpu->hm.s.Event.fPending = false;
    73987438
    73997439    return VINF_SUCCESS;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette