VirtualBox

Changeset 58914 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Nov 29, 2015 11:35:41 PM (9 years ago)
Author:
vboxsync
Message:

HM: More VBOXSTRICTRC and related stuff.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r58913 r58914  
    338338static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
    339339static void               hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
    340 static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
     340static VBOXSTRICTRC       hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
    341341                                                 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
    342342                                                 bool fStepping, uint32_t *puIntState);
     
    34153415    Assert((uIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
    34163416    int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
    3417     AssertRCReturn(rc, rc);
     3417    AssertRC(rc);
    34183418    return rc;
    34193419}
     
    66546654 * ring-3 for one reason or another.
    66556655 *
    6656  * @returns VBox status code (information status code included).
     6656 * @returns Strict VBox status code (information status code included).
    66576657 * @retval VINF_SUCCESS if we don't have any actions that require going back to
    66586658 *         ring-3.
     
    66716671 *                          out-of-sync. Make sure to update the required fields
    66726672 *                          before using them.
    6673  */
    6674 static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6673 * @param   fStepping       Running in hmR0VmxRunGuestCodeStep().
     6674 */
     6675static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
    66756676{
    66766677    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    66776678
    6678     if (   VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
    6679                             ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
    6680         || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
    6681                                ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
    6682     {
    6683         /* We need the control registers now, make sure the guest-CPU context is updated. */
    6684         int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    6685         AssertRCReturn(rc3, rc3);
    6686 
    6687         /* Pending HM CR3 sync. */
    6688         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    6689         {
    6690             int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
    6691             AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
    6692                             ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
    6693             Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    6694         }
    6695 
    6696         /* Pending HM PAE PDPEs. */
    6697         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    6698         {
    6699             PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    6700             Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    6701         }
    6702 
    6703         /* Pending PGM C3 sync. */
    6704         if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    6705         {
    6706             int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
    6707                                  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    6708             if (rc2 != VINF_SUCCESS)
    6709             {
    6710                 AssertRC(rc2);
    6711                 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
    6712                 return rc2;
    6713             }
    6714         }
    6715 
    6716         /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
    6717         if (   VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
    6718             || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    6719         {
    6720             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    6721             int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    6722             Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
    6723             return rc2;
    6724         }
    6725 
    6726         /* Pending VM request packets, such as hardware interrupts. */
    6727         if (   VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
    6728             || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
    6729         {
    6730             Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
    6731             return VINF_EM_PENDING_REQUEST;
    6732         }
    6733 
    6734         /* Pending PGM pool flushes. */
    6735         if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
    6736         {
    6737             Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
    6738             return VINF_PGM_POOL_FLUSH_PENDING;
    6739         }
    6740 
    6741         /* Pending DMA requests. */
    6742         if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
    6743         {
    6744             Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
    6745             return VINF_EM_RAW_TO_R3;
    6746         }
     6679    /*
     6680     * Anything pending?  Should be more likely than not if we're doing a good job.
     6681     */
     6682    if (  !fStepping
     6683        ?    !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK)
     6684          && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
     6685        :    !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
     6686          && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
     6687        return VINF_SUCCESS;
     6688
     6689    /* We need the control registers now, make sure the guest-CPU context is updated. */
     6690    int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     6691    AssertRCReturn(rc3, rc3);
     6692
     6693    /* Pending HM CR3 sync. */
     6694    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     6695    {
     6696        int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
     6697        AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
     6698                        ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
     6699        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     6700    }
     6701
     6702    /* Pending HM PAE PDPEs. */
     6703    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
     6704    {
     6705        PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     6706        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
     6707    }
     6708
     6709    /* Pending PGM C3 sync. */
     6710    if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     6711    {
     6712        VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
     6713                                            VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     6714        if (rcStrict2 != VINF_SUCCESS)
     6715        {
     6716            AssertRC(rcStrict2);
     6717            Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
     6718            return rcStrict2;
     6719        }
     6720    }
     6721
     6722    /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
     6723    if (   VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
     6724        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     6725    {
     6726        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     6727        int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
     6728        Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
     6729        return rc2;
     6730    }
     6731
     6732    /* Pending VM request packets, such as hardware interrupts. */
     6733    if (   VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
     6734        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
     6735    {
     6736        Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
     6737        return VINF_EM_PENDING_REQUEST;
     6738    }
     6739
     6740    /* Pending PGM pool flushes. */
     6741    if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
     6742    {
     6743        Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
     6744        return VINF_PGM_POOL_FLUSH_PENDING;
     6745    }
     6746
     6747    /* Pending DMA requests. */
     6748    if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
     6749    {
     6750        Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
     6751        return VINF_EM_RAW_TO_R3;
    67476752    }
    67486753
     
    74207425 * receive them.
    74217426 *
    7422  * @returns VBox status code (informational status codes included).
     7427 * @returns Strict VBox status code (informational status codes included).
    74237428 * @param   pVCpu           The cross context virtual CPU structure.
    74247429 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    74297434 *                          dispatched directly.
    74307435 */
    7431 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
     7436static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
    74327437{
    74337438    HMVMX_ASSERT_PREEMPT_SAFE();
     
    74447449    Assert(!TRPMHasTrap(pVCpu));
    74457450
    7446     int rc = VINF_SUCCESS;
     7451    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    74477452    if (pVCpu->hm.s.Event.fPending)
    74487453    {
     
    74737478        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    74747479              (uint8_t)uIntType));
    7475         rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
    7476                                     pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
    7477         AssertRCReturn(rc, rc);
     7480        rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
     7481                                          pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress,
     7482                                          fStepping, &uIntrState);
     7483        AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    74787484
    74797485        /* Update the interruptibility-state as it could have been changed by
     
    75237529    AssertRC(rc2);
    75247530
    7525     Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
     7531    Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
    75267532    NOREF(fBlockMovSS); NOREF(fBlockSti);
    7527     return rc;
     7533    return rcStrict;
    75287534}
    75297535
     
    75617567 *                          necessary. This cannot not be NULL.
    75627568 */
    7563 DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
     7569DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
    75647570{
    75657571    uint32_t u32IntInfo  = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
     
    76277633 *                              necessary. This cannot not be NULL.
    76287634 */
    7629 DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
    7630                                     bool fStepping, uint32_t *puIntrState)
     7635DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
     7636                                             bool fStepping, uint32_t *puIntrState)
    76317637{
    76327638    uint32_t u32IntInfo  = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
     
    76877693 * stack.
    76887694 *
    7689  * @returns VBox status code (information status code included).
    7690  * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
     7695 * @returns Strict VBox status code (information status code included).
     7696 * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    76917697 * @param   pVM         The cross context VM structure.
    76927698 * @param   pMixedCtx   Pointer to the guest-CPU context.
    76937699 * @param   uValue      The value to push to the guest stack.
    76947700 */
    7695 DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
     7701DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
    76967702{
    76977703    /*
     
    77047710    pMixedCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
    77057711    int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
    7706     AssertRCReturn(rc, rc);
     7712    AssertRC(rc);
    77077713    return rc;
    77087714}
     
    77137719 * in the VM-entry area in the VMCS.
    77147720 *
    7715  * @returns VBox status code (informational error codes included).
    7716  * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
    7717  * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
     7721 * @returns Strict VBox status code (informational error codes included).
     7722 * @retval  VINF_SUCCESS if the event is successfully injected into the VMCS.
     7723 * @retval  VINF_EM_RESET if event injection resulted in a triple-fault.
    77187724 *
    77197725 * @param   pVCpu               The cross context virtual CPU structure.
     
    77397745 * @remarks No-long-jump zone!!!
    77407746 */
    7741 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
    7742                                   uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState)
     7747static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
     7748                                           uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping,
     7749                                           uint32_t *puIntrState)
    77437750{
    77447751    /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
     
    78397846
    78407847            /* Construct the stack frame for the interrupt/exception handler. */
    7841             rc  = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
    7842             rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
    7843             rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
    7844             AssertRCReturn(rc, rc);
     7848            VBOXSTRICTRC rcStrict;
     7849            rcStrict  = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
     7850            if (rcStrict == VINF_SUCCESS)
     7851                rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
     7852            if (rcStrict == VINF_SUCCESS)
     7853                rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
    78457854
    78467855            /* Clear the required eflag bits and jump to the interrupt/exception handler. */
    7847             if (rc == VINF_SUCCESS)
     7856            if (rcStrict == VINF_SUCCESS)
    78487857            {
    78497858                pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
     
    78807889                /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
    78817890                if (fStepping)
    7882                     rc = VINF_EM_DBG_STEPPED;
     7891                    rcStrict = VINF_EM_DBG_STEPPED;
    78837892            }
    7884             Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
    7885             return rc;
     7893            AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
     7894                      ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     7895            return rcStrict;
    78867896        }
    78877897
     
    79127922
    79137923    AssertRCReturn(rc, rc);
    7914     return rc;
     7924    return VINF_SUCCESS;
    79157925}
    79167926
     
    83518361 *                          dispatching took place.
    83528362 */
    8353 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
     8363static VBOXSTRICTRC hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
    83548364{
    83558365    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    83608370
    83618371    /* Check force flag actions that might require us to go back to ring-3. */
    8362     int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
    8363     if (rc != VINF_SUCCESS)
    8364         return rc;
     8372    VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx, fStepping);
     8373    if (rcStrict == VINF_SUCCESS)
     8374    { /* FFs doesn't get set all the time. */ }
     8375    else
     8376        return rcStrict;
    83658377
    83668378#ifndef IEM_VERIFICATION_MODE_FULL
     
    83758387
    83768388        /* Unalias any existing mapping. */
    8377         rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
     8389        int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
    83788390        AssertRCReturn(rc, rc);
    83798391
     
    83958407     * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
    83968408     */
    8397     rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
    8398     if (RT_UNLIKELY(rc != VINF_SUCCESS))
    8399     {
    8400         Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
    8401         return rc;
     8409    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
     8410    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     8411    { /* likely */ }
     8412    else
     8413    {
     8414        AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
     8415                  ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     8416        return rcStrict;
    84028417    }
    84038418
     
    84298444     */
    84308445    pVmxTransient->fEFlags = ASMIntDisableFlags();
    8431     if (  (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
    8432            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    8433         && (   !fStepping /* Optimized for the non-stepping case, of course. */
    8434             || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
    8435     {
    8436         ASMSetFlags(pVmxTransient->fEFlags);
    8437         VMMRZCallRing3Enable(pVCpu);
     8446
     8447    if (   (   !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
     8448            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     8449        || (   fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
     8450            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
     8451    {
     8452        if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
     8453        {
     8454            /* We've injected any pending events. This is really the point of no return (to ring-3). */
     8455            pVCpu->hm.s.Event.fPending = false;
     8456
     8457            return VINF_SUCCESS;
     8458        }
     8459
     8460        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
     8461        rcStrict = VINF_EM_RAW_INTERRUPT;
     8462    }
     8463    else
     8464    {
    84388465        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    8439         return VINF_EM_RAW_TO_R3;
    8440     }
    8441 
    8442     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    8443     {
    8444         ASMSetFlags(pVmxTransient->fEFlags);
    8445         VMMRZCallRing3Enable(pVCpu);
    8446         STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    8447         return VINF_EM_RAW_INTERRUPT;
    8448     }
    8449 
    8450     /* We've injected any pending events. This is really the point of no return (to ring-3). */
    8451     pVCpu->hm.s.Event.fPending = false;
    8452 
    8453     return VINF_SUCCESS;
     8466        rcStrict = VINF_EM_RAW_TO_R3;
     8467    }
     8468
     8469    ASMSetFlags(pVmxTransient->fEFlags);
     8470    VMMRZCallRing3Enable(pVCpu);
     8471
     8472    return rcStrict;
    84548473}
    84558474
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette