Changeset 53178 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Nov 2, 2014 9:04:12 PM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r53176 r53178 345 345 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 346 346 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 347 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState); 347 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, 348 bool fStepping, uint32_t *puIntState); 348 349 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 349 350 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu); … … 7503 7504 * out-of-sync. Make sure to update the required fields 7504 7505 * before using them. 7505 */ 7506 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7506 * @param fStepping Running in hmR0VmxRunGuestCodeStep and we should 7507 * return VINF_EM_DBG_STEPPED an event was dispatched 7508 * directly. 7509 */ 7510 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping) 7507 7511 { 7508 7512 HMVMX_ASSERT_PREEMPT_SAFE(); … … 7554 7558 (uint8_t)uIntType)); 7555 7559 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7556 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);7560 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState); 7557 7561 AssertRCReturn(rc, rc); 7558 7562 … … 7604 7608 AssertRC(rc2); 7605 7609 7606 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET );7610 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping)); 7607 7611 NOREF(fBlockMovSS); NOREF(fBlockSti); 7608 7612 return rc; … … 7634 7638 * out-of-sync. Make sure to update the required fields 7635 7639 * before using them. 7636 */ 7637 DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState) 7640 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep and 7641 * should return VINF_EM_DBG_STEPPED if the event is 7642 * injected directly (registerd modified by us, not by 7643 * hardware on VM entry). 7644 * @param puIntrState Pointer to the current guest interruptibility-state. 7645 * This interruptibility-state will be updated if 7646 * necessary. This cannot not be NULL. 7647 */ 7648 DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState) 7638 7649 { 7639 7650 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; … … 7641 7652 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7642 7653 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, 7643 puIntrState);7654 fStepping, puIntrState); 7644 7655 } 7645 7656 … … 7692 7703 * mode, i.e. in real-mode it's not valid). 7693 7704 * @param u32ErrorCode The error code associated with the #GP. 7705 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep 7706 * and should return VINF_EM_DBG_STEPPED if the 7707 * event is injected directly (registerd modified 7708 * by us, not by hardware on VM entry). 7709 * @param puIntrState Pointer to the current guest interruptibility-state. 7710 * This interruptibility-state will be updated if 7711 * necessary. This cannot not be NULL. 7694 7712 */ 7695 7713 DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 7696 uint32_t *puIntrState)7714 bool fStepping, uint32_t *puIntrState) 7697 7715 { 7698 7716 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID; … … 7701 7719 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7702 7720 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, 7703 puIntrState);7721 fStepping, puIntrState); 7704 7722 } 7705 7723 … … 7796 7814 * This interruptibility-state will be updated if 7797 7815 * necessary. This cannot not be NULL. 7816 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep 7817 * and should return VINF_EM_DBG_STEPPED if the 7818 * event is injected directly (registerd modified 7819 * by us, not by hardware on VM entry). 7798 7820 * 7799 7821 * @remarks Requires CR0! … … 7801 7823 */ 7802 7824 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 7803 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)7825 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState) 7804 7826 { 7805 7827 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ … … 7871 7893 if (uVector == X86_XCPT_DF) 7872 7894 return VINF_EM_RESET; 7873 else if (uVector == X86_XCPT_GP) 7874 { 7875 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 7876 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState); 7877 } 7895 7896 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 7897 if (uVector == X86_XCPT_GP) 7898 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState); 7878 7899 7879 7900 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */ 7880 7901 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */ 7881 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState); 7902 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, 7903 fStepping, puIntrState); 7882 7904 } 7883 7905 … … 7932 7954 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 7933 7955 } 7934 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntInfo, u32ErrCode, cbInstr)); 7956 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x instrlen=%#x efl=%#x cs:eip=%04x:%04x\n", 7957 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip)); 7935 7958 7936 7959 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo' 7937 7960 it, if we are returning to ring-3 before executing guest code. */ 7938 7961 pVCpu->hm.s.Event.fPending = false; 7962 7963 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */ 7964 if (fStepping) 7965 rc = VINF_EM_DBG_STEPPED; 7939 7966 } 7940 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET );7967 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping)); 7941 7968 return rc; 7942 7969 } … … 8402 8429 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a 8403 8430 * double-fault into the guest. 8431 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was 8432 * dispatched directly. 8404 8433 * @retval VINF_* scheduling changes, we have to go back to ring-3. 8405 8434 * … … 8410 8439 * before using them. 8411 8440 * @param pVmxTransient Pointer to the VMX transient structure. 8412 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep, makes us 8413 * ignore some of the reasons for returning to ring-3. 8441 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep. Makes 8442 * us ignore some of the reasons for returning to 8443 * ring-3, and return VINF_EM_DBG_STEPPED if event 8444 * dispatching took place. 8414 8445 */ 8415 8446 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping) … … 8458 8489 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM. 8459 8490 */ 8460 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx );8491 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping); 8461 8492 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 8462 8493 { 8463 Assert(rc == VINF_EM_RESET );8494 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping)); 8464 8495 return rc; 8465 8496 } … … 9918 9949 return VINF_SUCCESS; 9919 9950 } 9920 elseif (RT_UNLIKELY(rc == VINF_EM_RESET))9951 if (RT_UNLIKELY(rc == VINF_EM_RESET)) 9921 9952 { 9922 9953 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3); … … 11814 11845 { 11815 11846 pMixedCtx->eflags.Bits.u1IF = 0; 11847 pMixedCtx->eflags.Bits.u1RF = 0; 11816 11848 pMixedCtx->rip += pDis->cbInstr; 11817 11849 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); … … 11823 11855 case OP_STI: 11824 11856 { 11857 bool fOldIF = pMixedCtx->eflags.Bits.u1IF; 11825 11858 pMixedCtx->eflags.Bits.u1IF = 1; 11859 pMixedCtx->eflags.Bits.u1RF = 0; 11826 11860 pMixedCtx->rip += pDis->cbInstr; 11827 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 11828 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 11861 if (!fOldIF) 11862 { 11863 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 11864 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 11865 } 11829 11866 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11830 11867 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); … … 11837 11874 rc = VINF_EM_HALT; 11838 11875 pMixedCtx->rip += pDis->cbInstr; 11839 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 11876 pMixedCtx->eflags.Bits.u1RF = 0; 11877 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11840 11878 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 11841 11879 break; … … 11876 11914 } 11877 11915 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip)); 11878 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask)) 11879 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 11880 pMixedCtx->eflags.Bits.u1RF = 0; /* The RF bit is always cleared by POPF; see Intel Instruction reference. */ 11916 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 11917 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 11881 11918 pMixedCtx->esp += cbParm; 11882 11919 pMixedCtx->esp &= uMask; … … 11932 11969 pMixedCtx->esp &= uMask; 11933 11970 pMixedCtx->rip += pDis->cbInstr; 11934 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP); 11971 pMixedCtx->eflags.Bits.u1RF = 0; 11972 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 11973 | HM_CHANGED_GUEST_RSP 11974 | HM_CHANGED_GUEST_RFLAGS); 11935 11975 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11936 11976 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); … … 11965 12005 pMixedCtx->cs.ValidSel = aIretFrame[1]; 11966 12006 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4; 11967 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))11968 12007 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF)) 12008 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 11969 12009 pMixedCtx->sp += sizeof(aIretFrame); 11970 12010 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP … … 11997 12037 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 11998 12038 } 12039 else 12040 { 12041 pMixedCtx->eflags.Bits.u1RF = 0; 12042 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 12043 } 11999 12044 break; 12000 12045 } … … 12002 12047 default: 12003 12048 { 12049 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */ 12004 12050 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */, 12005 12051 EMCODETYPE_SUPERVISOR); … … 12122 12168 return rc; 12123 12169 } 12124 elseif (rc == VINF_EM_RAW_GUEST_TRAP)12170 if (rc == VINF_EM_RAW_GUEST_TRAP) 12125 12171 { 12126 12172 if (!pVmxTransient->fVectoringDoublePF)
Note:
See TracChangeset
for help on using the changeset viewer.