Changeset 53176 in vbox
- Timestamp:
- Nov 2, 2014 12:34:02 AM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r53089 r53176 355 355 # define HMVMX_EXIT_DECL static DECLCALLBACK(int) 356 356 #endif 357 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 358 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart); 357 359 358 360 /** @name VM-exit handlers. … … 7740 7742 if ( uVector == X86_XCPT_BP 7741 7743 || uVector == X86_XCPT_OF) 7742 {7743 7744 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 7744 }7745 7745 else 7746 7746 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7911 7911 pMixedCtx->rip = IdtEntry.offSel; 7912 7912 pMixedCtx->cs.Sel = IdtEntry.uSel; 7913 pMixedCtx->cs.ValidSel = IdtEntry.uSel; 7913 7914 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry; 7914 7915 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7915 7916 && uVector == X86_XCPT_PF) 7916 {7917 7917 pMixedCtx->cr2 = GCPtrFaultAddress; 7918 }7919 7918 7920 7919 /* If any other guest-state bits are changed here, make sure to update … … 7942 7941 return rc; 7943 7942 } 7944 else 7945 { 7946 /* 7947 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit. 7948 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 7949 */ 7950 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7951 } 7943 7944 /* 7945 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit. 7946 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 7947 */ 7948 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7952 7949 } 7953 7950 … … 7965 7962 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7966 7963 && uVector == X86_XCPT_PF) 7967 {7968 7964 pMixedCtx->cr2 = GCPtrFaultAddress; 7969 }7970 7965 7971 7966 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu, … … 8415 8410 * before using them. 8416 8411 * @param pVmxTransient Pointer to the VMX transient structure. 8417 */ 8418 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 8412 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep, makes us 8413 * ignore some of the reasons for returning to ring-3. 8414 */ 8415 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping) 8419 8416 { 8420 8417 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 8495 8492 */ 8496 8493 pVmxTransient->uEflags = ASMIntDisableFlags(); 8497 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 8498 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 8494 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 8495 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 8496 && ( !fStepping /* Optimized for the non-stepping case, of course. */ 8497 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) ) 8499 8498 { 8500 8499 hmR0VmxClearEventVmcs(pVCpu); … … 8802 8801 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 8803 8802 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 8804 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient );8803 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /*fStepping*/); 8805 8804 if (rc != VINF_SUCCESS) 8806 8805 break; … … 8863 8862 VMXTRANSIENT VmxTransient; 8864 8863 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true; 8865 int rc= VERR_INTERNAL_ERROR_5;8864 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5; 8866 8865 uint32_t cLoops = 0; 8867 8866 uint16_t uCsStart = pCtx->cs.Sel; … … 8876 8875 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 8877 8876 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 8878 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);8879 if (rc != VINF_SUCCESS)8877 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /*fStepping*/); 8878 if (rcStrict != VINF_SUCCESS) 8880 8879 break; 8881 8880 8882 8881 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient); 8883 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);8882 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx); 8884 8883 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ 8885 8884 8886 8885 /* Restore any residual host-state and save any bits shared between host 8887 8886 and guest into the guest-CPU state. Re-enables interrupts! */ 8888 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);8887 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict)); 8889 8888 8890 8889 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ 8891 if (RT_UNLIKELY(rc != VINF_SUCCESS))8890 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 8892 8891 { 8893 8892 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 8894 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);8895 return rc;8896 } 8897 8898 /* Handle the VM-exit . */8893 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient); 8894 return VBOXSTRICTRC_TODO(rcStrict); 8895 } 8896 8897 /* Handle the VM-exit - we quit earlier on certain exits, see hmR0VmxHandleExitStep. */ 8899 8898 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8900 8899 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 8902 8901 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 8903 8902 HMVMX_START_EXIT_DISPATCH_PROF(); 8904 #ifdef HMVMX_USE_FUNCTION_TABLE 8905 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient); 8906 #else 8907 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason); 8908 #endif 8903 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart); 8909 8904 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 8910 if (rc != VINF_SUCCESS)8905 if (rcStrict != VINF_SUCCESS) 8911 8906 break; 8912 elseif (cLoops > pVM->hm.s.cMaxResumeLoops)8907 if (cLoops > pVM->hm.s.cMaxResumeLoops) 8913 8908 { 8914 8909 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 8915 rc = VINF_EM_RAW_INTERRUPT;8910 rcStrict = VINF_EM_RAW_INTERRUPT; 8916 8911 break; 8917 8912 } … … 8927 8922 || pCtx->cs.Sel != uCsStart) 8928 8923 { 8929 rc = VINF_EM_DBG_STEPPED;8924 rcStrict = VINF_EM_DBG_STEPPED; 8930 8925 break; 8931 8926 } … … 8948 8943 8949 8944 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 8950 return rc;8945 return VBOXSTRICTRC_TODO(rcStrict); 8951 8946 } 8952 8947 … … 9065 9060 return rc; 9066 9061 } 9067 #endif 9062 #endif /* !HMVMX_USE_FUNCTION_TABLE */ 9063 9064 9065 /** 9066 * Single stepping exit filtering. 9067 * 9068 * This is preprocessing the exits and deciding whether we've gotten far enough 9069 * to return VINF_EM_DBG_STEPPED already. If not, normal exit handling is 9070 * performed. 9071 * 9072 * @returns Strict VBox status code. 9073 * @param pVCpu The virtual CPU of the calling EMT. 9074 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 9075 * out-of-sync. Make sure to update the required 9076 * fields before using them. 9077 * @param pVmxTransient Pointer to the VMX-transient structure. 9078 * @param uExitReason The exit reason. 9079 */ 9080 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 9081 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart) 9082 { 9083 switch (uExitReason) 9084 { 9085 case VMX_EXIT_XCPT_OR_NMI: 9086 { 9087 /* Check for NMI. */ 9088 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 9089 AssertRCReturn(rc2, rc2); 9090 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo); 9091 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 9092 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); 9093 /* fall thru */ 9094 } 9095 9096 case VMX_EXIT_EPT_MISCONFIG: 9097 case VMX_EXIT_TRIPLE_FAULT: 9098 case VMX_EXIT_APIC_ACCESS: 9099 case VMX_EXIT_TPR_BELOW_THRESHOLD: 9100 case VMX_EXIT_TASK_SWITCH: 9101 9102 /* Instruction specfic exits: */ 9103 case VMX_EXIT_IO_INSTR: 9104 case VMX_EXIT_CPUID: 9105 case VMX_EXIT_RDTSC: 9106 case VMX_EXIT_RDTSCP: 9107 case VMX_EXIT_MOV_CRX: 9108 case VMX_EXIT_MWAIT: 9109 case VMX_EXIT_MONITOR: 9110 case VMX_EXIT_RDMSR: 9111 case VMX_EXIT_WRMSR: 9112 case VMX_EXIT_MOV_DRX: 9113 case VMX_EXIT_HLT: 9114 case VMX_EXIT_INVD: 9115 case VMX_EXIT_INVLPG: 9116 case VMX_EXIT_RSM: 9117 case VMX_EXIT_PAUSE: 9118 case VMX_EXIT_XDTR_ACCESS: 9119 case VMX_EXIT_TR_ACCESS: 9120 case VMX_EXIT_WBINVD: 9121 case VMX_EXIT_XSETBV: 9122 case VMX_EXIT_RDRAND: 9123 case VMX_EXIT_INVPCID: 9124 case VMX_EXIT_GETSEC: 9125 case VMX_EXIT_RDPMC: 9126 case VMX_EXIT_VMCALL: 9127 case VMX_EXIT_VMCLEAR: 9128 case VMX_EXIT_VMLAUNCH: 9129 case VMX_EXIT_VMPTRLD: 9130 case VMX_EXIT_VMPTRST: 9131 case VMX_EXIT_VMREAD: 9132 case VMX_EXIT_VMRESUME: 9133 case VMX_EXIT_VMWRITE: 9134 case VMX_EXIT_VMXOFF: 9135 case VMX_EXIT_VMXON: 9136 case VMX_EXIT_INVEPT: 9137 case VMX_EXIT_INVVPID: 9138 case VMX_EXIT_VMFUNC: 9139 { 9140 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 9141 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 9142 AssertRCReturn(rc2, rc2); 9143 if ( pMixedCtx->rip != uRipStart 9144 || pMixedCtx->cs.Sel != uCsStart) 9145 return VINF_EM_DBG_STEPPED; 9146 break; 9147 } 9148 } 9149 9150 /* 9151 * Normal processing. 9152 */ 9153 #ifdef HMVMX_USE_FUNCTION_TABLE 9154 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient); 9155 #else 9156 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason); 9157 #endif 9158 } 9159 9068 9160 9069 9161 #ifdef DEBUG … … 10915 11007 * interpreting the instruction. 10916 11008 */ 10917 Log4(("CS:RIP=%04x:% #RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));11009 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 10918 11010 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2); 10919 11011 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo)) … … 10983 11075 * IN/OUT - I/O instruction. 10984 11076 */ 10985 Log4(("CS:RIP=%04x:% #RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));11077 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 10986 11078 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 10987 11079 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification)); … … 11306 11398 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 11307 11399 AssertRCReturn(rc, rc); 11308 Log4(("CS:RIP=%04x:% #RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));11400 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 11309 11401 11310 11402 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 11433 11525 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode); 11434 11526 11435 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:% #RX64\n", pVmxTransient->uExitQualification, GCPhys,11436 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));11527 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys, 11528 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 11437 11529 11438 11530 /* Handle the pagefault trap for the nested shadow table. */ … … 11688 11780 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 11689 11781 AssertRCReturn(rc, rc); 11690 Log4(("#GP Gst: CS:RIP %04x:% #RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,11691 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));11782 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip, 11783 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel)); 11692 11784 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 11693 11785 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); … … 11716 11808 rc = VINF_SUCCESS; 11717 11809 Assert(cbOp == pDis->cbInstr); 11718 Log4(("#GP Disas OpCode=%u CS:EIP %04x:% #RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));11810 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 11719 11811 switch (pDis->pCurInstr->uOpcode) 11720 11812 { … … 11752 11844 case OP_POPF: 11753 11845 { 11754 Log4(("POPF CS:RIP %04x:% #RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));11846 Log4(("POPF CS:RIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 11755 11847 uint32_t cbParm; 11756 11848 uint32_t uMask; … … 11791 11883 pMixedCtx->rip += pDis->cbInstr; 11792 11884 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 11793 11794 11885 | HM_CHANGED_GUEST_RSP 11886 | HM_CHANGED_GUEST_RFLAGS); 11795 11887 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */ 11796 11888 if (fStepping) … … 11883 11975 if (fStepping) 11884 11976 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11885 Log4(("IRET %#RX32 to %04x:% x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));11977 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 11886 11978 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 11887 11979 break;
Note:
See TracChangeset
for help on using the changeset viewer.