Changeset 58118 in vbox
- Timestamp:
- Oct 8, 2015 4:04:59 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp
r58116 r58118 753 753 754 754 /* 755 * Perform the hypercall and update RIP.755 * Update RIP and perform the hypercall. 756 756 */ 757 pCtx->rip += pDis->cbInstr; 757 758 rc = gimHvHypercall(pVCpu, pCtx); 758 pCtx->rip += pDis->cbInstr; 759 return rc; 760 } 761 return VERR_GIM_OPERATION_FAILED; 762 } 763 return VERR_GIM_OPERATION_FAILED; 764 } 765 759 } 760 else 761 rc = VERR_GIM_OPERATION_FAILED; 762 } 763 return rc; 764 } 765 -
trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp
r58116 r58118 409 409 410 410 /* 411 * Perform the hypercall and update RIP.411 * Update RIP and perform the hypercall. 412 412 * 413 413 * For HM, we can simply resume guest execution without performing the hypercall now and … … 419 419 if (RT_SUCCESS(rc)) 420 420 { 421 int rc2 = gimKvmHypercall(pVCpu, pCtx);422 AssertRC(rc2);423 421 pCtx->rip += pDis->cbInstr; 424 } 425 return rc; 426 } 427 } 428 429 return VERR_GIM_OPERATION_FAILED; 430 } 431 422 rc = gimKvmHypercall(pVCpu, pCtx); 423 } 424 } 425 else 426 rc = VERR_GIM_OPERATION_FAILED; 427 } 428 return rc; 429 } 430 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r58116 r58118 1444 1444 * the VMM level like the VT-x implementations does. 1445 1445 */ 1446 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);1446 bool const fStepping = pVCpu->hm.s.fSingleInstruction; 1447 1447 if (fStepping) 1448 1448 { … … 1452 1452 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */ 1453 1453 } 1454 else 1455 Assert(!DBGFIsStepping(pVCpu)); 1454 1456 1455 1457 if ( fStepping … … 3474 3476 3475 3477 int rc; 3476 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))3478 if (!pVCpu->hm.s.fSingleInstruction) 3477 3479 rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx); 3478 3480 else … … 5157 5159 if (pVCpu->hm.s.fHypercallsEnabled) 5158 5160 { 5161 hmR0SvmUpdateRip(pVCpu, pCtx, 3); 5159 5162 rc = GIMHypercall(pVCpu, pCtx); 5160 if ( rc == VINF_SUCCESS 5161 || rc == VINF_GIM_R3_HYPERCALL) 5162 { 5163 /* If the hypercall changes anything other than guest general-purpose registers, 5164 we would need to reload the guest changed bits here before VM-reentry. */ 5165 hmR0SvmUpdateRip(pVCpu, pCtx, 3); 5166 return rc; 5167 } 5163 /* If the hypercall changes anything other than guest general-purpose registers, 5164 we would need to reload the guest changed bits here before VM-entry. */ 5165 return rc; 5168 5166 } 5169 5167 else -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r58116 r58118 4008 4008 bool fInterceptDB = false; 4009 4009 bool fInterceptMovDRx = false; 4010 if ( pVCpu->hm.s.fSingleInstruction 4011 || DBGFIsStepping(pVCpu)) 4010 if (pVCpu->hm.s.fSingleInstruction) 4012 4011 { 4013 4012 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ … … 7359 7358 && !pVCpu->hm.s.fSingleInstruction) 7360 7359 { 7360 Assert(!DBGFIsStepping(pVCpu)); 7361 7361 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7362 7362 AssertRC(rc); … … 7392 7392 /** 7393 7393 * Sets a pending-debug exception to be delivered to the guest if the guest is 7394 * single-stepping .7394 * single-stepping in the VMCS. 7395 7395 * 7396 7396 * @param pVCpu Pointer to the VMCPU. 7397 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7398 * out-of-sync. Make sure to update the required fields 7399 * before using them. 7400 */ 7401 DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7397 */ 7398 DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu) 7402 7399 { 7403 7400 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu); 7404 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */ 7405 { 7406 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 7407 AssertRC(rc); 7408 } 7401 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 7402 AssertRC(rc); 7409 7403 } 7410 7404 … … 7486 7480 || fBlockMovSS) 7487 7481 { 7488 if ( !pVCpu->hm.s.fSingleInstruction 7489 && !DBGFIsStepping(pVCpu)) 7482 if (!pVCpu->hm.s.fSingleInstruction) 7490 7483 { 7491 7484 /* … … 7494 7487 * See Intel spec. 27.3.4 "Saving Non-Register State". 7495 7488 */ 7489 Assert(!DBGFIsStepping(pVCpu)); 7496 7490 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7497 7491 AssertRCReturn(rc2, rc2); 7498 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 7492 if (pMixedCtx->eflags.Bits.u1TF) 7493 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 7499 7494 } 7500 7495 else if (pMixedCtx->eflags.Bits.u1TF) … … 8912 8907 8913 8908 int rc; 8914 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))8909 if (!pVCpu->hm.s.fSingleInstruction) 8915 8910 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx); 8916 8911 else … … 9181 9176 * See Intel spec. 32.2.1 "Debug Exceptions". 9182 9177 */ 9183 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 9178 if ( !pVCpu->hm.s.fSingleInstruction 9179 && pMixedCtx->eflags.Bits.u1TF) 9180 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 9184 9181 9185 9182 return rc; … … 10145 10142 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */ 10146 10143 #endif 10144 rc |= hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 10147 10145 AssertRCReturn(rc, rc); 10148 10146 10149 10147 rc = GIMHypercall(pVCpu, pMixedCtx); 10150 if ( rc == VINF_SUCCESS 10151 || rc == VINF_GIM_R3_HYPERCALL) 10152 { 10153 /* If the hypercall changes anything other than guest general-purpose registers, 10154 we would need to reload the guest changed bits here before VM-reentry. */ 10155 hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 10156 return rc; 10157 } 10148 /* If the hypercall changes anything other than guest general-purpose registers, 10149 we would need to reload the guest changed bits here before VM-entry. */ 10150 return rc; 10158 10151 } 10159 10152 else 10153 { 10160 10154 Log4(("hmR0VmxExitVmcall: Hypercalls not enabled\n")); 10161 10162 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx); 10155 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx); 10156 } 10157 10163 10158 return VINF_SUCCESS; 10164 10159 } … … 10910 10905 10911 10906 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 10912 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification); 10913 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification); 10914 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification) 10915 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT); 10916 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification); 10917 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 10907 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification); 10908 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification); 10909 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification) 10910 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT); 10911 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification); 10912 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 10913 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; 10918 10914 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1); 10919 10915 … … 11049 11045 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 11050 11046 } 11051 else if (fStepping) 11052 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11047 else if ( !fDbgStepping 11048 && fGstStepping) 11049 { 11050 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11051 } 11053 11052 11054 11053 /* … … 11283 11282 11284 11283 int rc = VERR_INTERNAL_ERROR_5; 11285 if ( !DBGFIsStepping(pVCpu) 11286 && !pVCpu->hm.s.fSingleInstruction 11284 if ( !pVCpu->hm.s.fSingleInstruction 11287 11285 && !pVmxTransient->fWasHyperDebugStateActive) 11288 11286 { 11287 Assert(!DBGFIsStepping(pVCpu)); 11288 11289 11289 /* Don't intercept MOV DRx and #DB any more. */ 11290 11290 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; … … 11740 11740 uint32_t cbOp = 0; 11741 11741 PVM pVM = pVCpu->CTX_SUFF(pVM); 11742 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; 11742 11743 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); 11743 11744 if (RT_SUCCESS(rc)) … … 11754 11755 pMixedCtx->rip += pDis->cbInstr; 11755 11756 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11756 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11757 if ( !fDbgStepping 11758 && pMixedCtx->eflags.Bits.u1TF) 11759 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11757 11760 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 11758 11761 break; … … 11771 11774 } 11772 11775 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11773 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11776 if ( !fDbgStepping 11777 && pMixedCtx->eflags.Bits.u1TF) 11778 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11774 11779 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 11775 11780 break; … … 11791 11796 uint32_t cbParm; 11792 11797 uint32_t uMask; 11793 bool f Stepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);11798 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 11794 11799 if (pDis->fPrefix & DISPREFIX_OPSIZE) 11795 11800 { … … 11829 11834 | HM_CHANGED_GUEST_RSP 11830 11835 | HM_CHANGED_GUEST_RFLAGS); 11831 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */ 11832 if (fStepping) 11833 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11834 11836 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 11837 POPF restores EFLAGS.TF. */ 11838 if ( !fDbgStepping 11839 && fGstStepping) 11840 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11835 11841 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 11836 11842 break; … … 11881 11887 | HM_CHANGED_GUEST_RSP 11882 11888 | HM_CHANGED_GUEST_RFLAGS); 11883 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11889 if ( !fDbgStepping 11890 && pMixedCtx->eflags.Bits.u1TF) 11891 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11884 11892 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 11885 11893 break; … … 11890 11898 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel 11891 11899 * instruction reference. */ 11892 RTGCPTR GCPtrStack = 0;11893 uint32_t uMask = 0xffff;11894 bool f Stepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);11900 RTGCPTR GCPtrStack = 0; 11901 uint32_t uMask = 0xffff; 11902 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 11895 11903 uint16_t aIretFrame[3]; 11896 11904 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE)) … … 11925 11933 | HM_CHANGED_GUEST_RFLAGS); 11926 11934 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 11927 if (fStepping) 11928 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx); 11935 if ( !fDbgStepping 11936 && fGstStepping) 11937 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 11929 11938 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 11930 11939 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
Note:
See TracChangeset
for help on using the changeset viewer.