Changeset 58914 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Nov 29, 2015 11:35:41 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r58913 r58914 338 338 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 339 339 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu); 340 static inthmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,340 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 341 341 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, 342 342 bool fStepping, uint32_t *puIntState); … … 3415 3415 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 3416 3416 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState); 3417 AssertRC Return(rc,rc);3417 AssertRC(rc); 3418 3418 return rc; 3419 3419 } … … 6654 6654 * ring-3 for one reason or another. 6655 6655 * 6656 * @returns VBox status code (information status code included).6656 * @returns Strict VBox status code (information status code included). 6657 6657 * @retval VINF_SUCCESS if we don't have any actions that require going back to 6658 6658 * ring-3. … … 6671 6671 * out-of-sync. Make sure to update the required fields 6672 6672 * before using them. 6673 */ 6674 static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6673 * @param fStepping Running in hmR0VmxRunGuestCodeStep(). 6674 */ 6675 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping) 6675 6676 { 6676 6677 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 6677 6678 6678 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction 6679 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK) 6680 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction 6681 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) ) 6682 { 6683 /* We need the control registers now, make sure the guest-CPU context is updated. */ 6684 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 6685 AssertRCReturn(rc3, rc3); 6686 6687 /* Pending HM CR3 sync. */ 6688 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6689 { 6690 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3); 6691 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 6692 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); 6693 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6694 } 6695 6696 /* Pending HM PAE PDPEs. */ 6697 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 6698 { 6699 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 6700 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6701 } 6702 6703 /* Pending PGM C3 sync. */ 6704 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 6705 { 6706 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 6707 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 6708 if (rc2 != VINF_SUCCESS) 6709 { 6710 AssertRC(rc2); 6711 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2)); 6712 return rc2; 6713 } 6714 } 6715 6716 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */ 6717 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK) 6718 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 6719 { 6720 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 6721 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 6722 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2)); 6723 return rc2; 6724 } 6725 6726 /* Pending VM request packets, such as hardware interrupts. */ 6727 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 6728 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 6729 { 6730 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n")); 6731 return VINF_EM_PENDING_REQUEST; 6732 } 6733 6734 /* Pending PGM pool flushes. */ 6735 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 6736 { 6737 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n")); 6738 return VINF_PGM_POOL_FLUSH_PENDING; 6739 } 6740 6741 /* Pending DMA requests. */ 6742 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 6743 { 6744 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n")); 6745 return VINF_EM_RAW_TO_R3; 6746 } 6679 /* 6680 * Anything pending? Should be more likely than not if we're doing a good job. 6681 */ 6682 if ( !fStepping 6683 ? !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK) 6684 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK) 6685 : !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK) 6686 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) ) 6687 return VINF_SUCCESS; 6688 6689 /* We need the control registers now, make sure the guest-CPU context is updated. */ 6690 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 6691 AssertRCReturn(rc3, rc3); 6692 6693 /* Pending HM CR3 sync. */ 6694 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6695 { 6696 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3); 6697 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 6698 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); 6699 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6700 } 6701 6702 /* Pending HM PAE PDPEs. */ 6703 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 6704 { 6705 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 6706 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6707 } 6708 6709 /* Pending PGM C3 sync. */ 6710 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 6711 { 6712 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 6713 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 6714 if (rcStrict2 != VINF_SUCCESS) 6715 { 6716 AssertRC(rcStrict2); 6717 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2))); 6718 return rcStrict2; 6719 } 6720 } 6721 6722 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */ 6723 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK) 6724 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 6725 { 6726 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 6727 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 6728 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2)); 6729 return rc2; 6730 } 6731 6732 /* Pending VM request packets, such as hardware interrupts. */ 6733 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 6734 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 6735 { 6736 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n")); 6737 return VINF_EM_PENDING_REQUEST; 6738 } 6739 6740 /* Pending PGM pool flushes. */ 6741 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 6742 { 6743 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n")); 6744 return VINF_PGM_POOL_FLUSH_PENDING; 6745 } 6746 6747 /* Pending DMA requests. */ 6748 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 6749 { 6750 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n")); 6751 return VINF_EM_RAW_TO_R3; 6747 6752 } 6748 6753 … … 7420 7425 * receive them. 7421 7426 * 7422 * @returns VBox status code (informational status codes included).7427 * @returns Strict VBox status code (informational status codes included). 7423 7428 * @param pVCpu The cross context virtual CPU structure. 7424 7429 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 7429 7434 * dispatched directly. 7430 7435 */ 7431 static inthmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)7436 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping) 7432 7437 { 7433 7438 HMVMX_ASSERT_PREEMPT_SAFE(); … … 7444 7449 Assert(!TRPMHasTrap(pVCpu)); 7445 7450 7446 int rc= VINF_SUCCESS;7451 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 7447 7452 if (pVCpu->hm.s.Event.fPending) 7448 7453 { … … 7473 7478 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 7474 7479 (uint8_t)uIntType)); 7475 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7476 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState); 7477 AssertRCReturn(rc, rc); 7480 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7481 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, 7482 fStepping, &uIntrState); 7483 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 7478 7484 7479 7485 /* Update the interruptibility-state as it could have been changed by … … 7523 7529 AssertRC(rc2); 7524 7530 7525 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc== VINF_EM_DBG_STEPPED && fStepping));7531 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping)); 7526 7532 NOREF(fBlockMovSS); NOREF(fBlockSti); 7527 return rc ;7533 return rcStrict; 7528 7534 } 7529 7535 … … 7561 7567 * necessary. This cannot not be NULL. 7562 7568 */ 7563 DECLINLINE( int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)7569 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState) 7564 7570 { 7565 7571 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; … … 7627 7633 * necessary. This cannot not be NULL. 7628 7634 */ 7629 DECLINLINE( int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,7630 bool fStepping, uint32_t *puIntrState)7635 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 7636 bool fStepping, uint32_t *puIntrState) 7631 7637 { 7632 7638 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID; … … 7687 7693 * stack. 7688 7694 * 7689 * @returns VBox status code (information status code included).7690 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.7695 * @returns Strict VBox status code (information status code included). 7696 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault. 7691 7697 * @param pVM The cross context VM structure. 7692 7698 * @param pMixedCtx Pointer to the guest-CPU context. 7693 7699 * @param uValue The value to push to the guest stack. 7694 7700 */ 7695 DECLINLINE( int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)7701 DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue) 7696 7702 { 7697 7703 /* … … 7704 7710 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */ 7705 7711 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t)); 7706 AssertRC Return(rc,rc);7712 AssertRC(rc); 7707 7713 return rc; 7708 7714 } … … 7713 7719 * in the VM-entry area in the VMCS. 7714 7720 * 7715 * @returns VBox status code (informational error codes included).7716 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.7717 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.7721 * @returns Strict VBox status code (informational error codes included). 7722 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS. 7723 * @retval VINF_EM_RESET if event injection resulted in a triple-fault. 7718 7724 * 7719 7725 * @param pVCpu The cross context virtual CPU structure. … … 7739 7745 * @remarks No-long-jump zone!!! 7740 7746 */ 7741 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 7742 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState) 7747 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 7748 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, 7749 uint32_t *puIntrState) 7743 7750 { 7744 7751 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ … … 7839 7846 7840 7847 /* Construct the stack frame for the interrupt/exception handler. */ 7841 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32); 7842 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel); 7843 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp); 7844 AssertRCReturn(rc, rc); 7848 VBOXSTRICTRC rcStrict; 7849 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32); 7850 if (rcStrict == VINF_SUCCESS) 7851 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel); 7852 if (rcStrict == VINF_SUCCESS) 7853 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp); 7845 7854 7846 7855 /* Clear the required eflag bits and jump to the interrupt/exception handler. */ 7847 if (rc == VINF_SUCCESS)7856 if (rcStrict == VINF_SUCCESS) 7848 7857 { 7849 7858 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC); … … 7880 7889 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */ 7881 7890 if (fStepping) 7882 rc = VINF_EM_DBG_STEPPED;7891 rcStrict = VINF_EM_DBG_STEPPED; 7883 7892 } 7884 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping)); 7885 return rc; 7893 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping), 7894 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 7895 return rcStrict; 7886 7896 } 7887 7897 … … 7912 7922 7913 7923 AssertRCReturn(rc, rc); 7914 return rc;7924 return VINF_SUCCESS; 7915 7925 } 7916 7926 … … 8351 8361 * dispatching took place. 8352 8362 */ 8353 static inthmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)8363 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping) 8354 8364 { 8355 8365 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 8360 8370 8361 8371 /* Check force flag actions that might require us to go back to ring-3. */ 8362 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx); 8363 if (rc != VINF_SUCCESS) 8364 return rc; 8372 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx, fStepping); 8373 if (rcStrict == VINF_SUCCESS) 8374 { /* FFs doesn't get set all the time. */ } 8375 else 8376 return rcStrict; 8365 8377 8366 8378 #ifndef IEM_VERIFICATION_MODE_FULL … … 8375 8387 8376 8388 /* Unalias any existing mapping. */ 8377 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);8389 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase); 8378 8390 AssertRCReturn(rc, rc); 8379 8391 … … 8395 8407 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM. 8396 8408 */ 8397 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping); 8398 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 8399 { 8400 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping)); 8401 return rc; 8409 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping); 8410 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8411 { /* likely */ } 8412 else 8413 { 8414 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping), 8415 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 8416 return rcStrict; 8402 8417 } 8403 8418 … … 8429 8444 */ 8430 8445 pVmxTransient->fEFlags = ASMIntDisableFlags(); 8431 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 8432 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 8433 && ( !fStepping /* Optimized for the non-stepping case, of course. */ 8434 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) ) 8435 { 8436 ASMSetFlags(pVmxTransient->fEFlags); 8437 VMMRZCallRing3Enable(pVCpu); 8446 8447 if ( ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 8448 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 8449 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */ 8450 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) ) 8451 { 8452 if (!RTThreadPreemptIsPending(NIL_RTTHREAD)) 8453 { 8454 /* We've injected any pending events. This is really the point of no return (to ring-3). */ 8455 pVCpu->hm.s.Event.fPending = false; 8456 8457 return VINF_SUCCESS; 8458 } 8459 8460 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 8461 rcStrict = VINF_EM_RAW_INTERRUPT; 8462 } 8463 else 8464 { 8438 8465 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 8439 return VINF_EM_RAW_TO_R3; 8440 } 8441 8442 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 8443 { 8444 ASMSetFlags(pVmxTransient->fEFlags); 8445 VMMRZCallRing3Enable(pVCpu); 8446 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 8447 return VINF_EM_RAW_INTERRUPT; 8448 } 8449 8450 /* We've injected any pending events. This is really the point of no return (to ring-3). */ 8451 pVCpu->hm.s.Event.fPending = false; 8452 8453 return VINF_SUCCESS; 8466 rcStrict = VINF_EM_RAW_TO_R3; 8467 } 8468 8469 ASMSetFlags(pVmxTransient->fEFlags); 8470 VMMRZCallRing3Enable(pVCpu); 8471 8472 return rcStrict; 8454 8473 } 8455 8474
Note:
See TracChangeset
for help on using the changeset viewer.