Changeset 45387 in vbox
- Timestamp:
- Apr 5, 2013 9:37:40 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 84812
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45384 r45387 376 376 377 377 /** 378 * Disables longjmps to ring-3.379 * @param pVCpu Pointer to the VMCPU.380 */381 DECLINLINE(void) hmR0VmxCallRing3Disable(PVMCPU pVCpu)382 {383 while (VMMRZCallRing3IsEnabled(pVCpu))384 VMMRZCallRing3Disable(pVCpu);385 Assert(VMMR0IsLogFlushDisabled(pVCpu));386 }387 388 389 /**390 * Enables longjmps to ring-3.391 * @param pVCpu Pointer to the VMCPU.392 */393 DECLINLINE(void) hmR0VmxCallRing3Enable(PVMCPU pVCpu)394 {395 while (!VMMRZCallRing3IsEnabled(pVCpu))396 VMMRZCallRing3Enable(pVCpu);397 Assert(!VMMR0IsLogFlushDisabled(pVCpu));398 }399 400 401 /**402 378 * Updates the VM's last error record. If there was a VMX instruction error, 403 379 * reads the error data from the VMCS and updates VCPU's last error record as … … 6338 6314 { 6339 6315 ASMSetFlags(pVmxTransient->uEFlags); 6340 STAM_COUNTER_INC(&pVCpu->hm.s.Stat ExitPreemptPending);6316 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 6341 6317 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */ 6342 6318 return VINF_EM_RAW_INTERRUPT; … … 6374 6350 { 6375 6351 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 6352 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6376 6353 6377 6354 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 6486 6463 6487 6464 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx); 6488 hmR0VmxCallRing3Enable(pVCpu);/* It is now safe to do longjmps to ring-3!!! */6465 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 6489 6466 6490 6467 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */ … … 6554 6531 * This also disables flushing of the R0-logger instance (if any). 6555 6532 */ 6556 hmR0VmxCallRing3Disable(pVCpu);6533 VMMRZCallRing3Disable(pVCpu); 6557 6534 VMMRZCallRing3RemoveNotification(pVCpu); 6558 6535 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient); … … 6641 6618 { 6642 6619 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6643 STAM_COUNTER_INC(&pVCpu->hm.s.Stat PendingHostIrq);6620 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 6644 6621 return VINF_SUCCESS; 6645 6622 } … … 6655 6632 AssertRCReturn(rc, rc); 6656 6633 6657 uint 8_t u8IntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);6634 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo); 6658 6635 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT) 6659 && u 8IntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);6660 6661 if (u 8IntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)6636 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT); 6637 6638 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 6662 6639 return VINF_EM_RAW_INTERRUPT; 6663 6664 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;6665 Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(uExitIntrInfo));6666 6640 6667 6641 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ … … 6672 6646 return rc; 6673 6647 6674 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo); 6675 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)) 6648 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo; 6649 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo); 6650 switch (uIntrType) 6676 6651 { 6677 6652 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */ … … 6710 6685 Assert(pVM->hm.s.vmx.pRealModeTSS); 6711 6686 Assert(PDMVmmDevHeapIsEnabled(pVM)); 6712 rc 6713 rc 6687 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 6688 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient); 6714 6689 AssertRCReturn(rc, rc); 6715 6690 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, … … 7508 7483 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */ 7509 7484 { 7485 #if 0 7510 7486 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */ 7511 7487 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); 7488 #else 7489 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx); 7490 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); 7491 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); 7492 #endif 7512 7493 AssertRCReturn(rc, rc); 7513 7494 … … 7552 7533 { 7553 7534 /* EMInterpretCRxRead() requires EFER MSR, CS. */ 7554 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);7535 rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); 7555 7536 AssertRCReturn(rc, rc); 7556 7537 Assert( !pVM->hm.s.fNestedPaging … … 7576 7557 rc |= EMInterpretCLTS(pVM, pVCpu); 7577 7558 AssertRCReturn(rc, rc); 7578 if (RT_LIKELY(rc == VINF_SUCCESS)) 7579 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 7559 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 7580 7560 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 7581 7561 Log(("CRX CLTS write rc=%d\n", rc)); … … 7586 7566 { 7587 7567 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx); 7588 rc |= EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));7589 7568 AssertRCReturn(rc, rc); 7569 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 7590 7570 if (RT_LIKELY(rc == VINF_SUCCESS)) 7591 7571 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r45305 r45387 1376 1376 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 1377 1377 { 1378 STAM_COUNTER_INC(&pVCpu->hm.s.Stat ExitPreemptPending);1378 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 1379 1379 rc = VINF_EM_RAW_INTERRUPT; 1380 1380 goto end; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45378 r45387 3150 3150 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 3151 3151 { 3152 STAM_COUNTER_INC(&pVCpu->hm.s.Stat ExitPreemptPending);3152 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 3153 3153 rc = VINF_EM_RAW_INTERRUPT; 3154 3154 goto end; … … 3527 3527 Assert(exitReason == VMX_EXIT_EXT_INT); 3528 3528 /* External interrupt; leave to allow it to be dispatched again. */ 3529 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 3529 3530 rc = VINF_EM_RAW_INTERRUPT; 3530 3531 break; -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r45378 r45387 576 576 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow"); 577 577 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume, "/HM/CPU%d/Exit/MaxResume"); 578 HM_REG_COUNTER(&pVCpu->hm.s.StatExit PreemptPending, "/HM/CPU%d/Exit/PreemptPending");578 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt"); 579 579 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer, "/HM/CPU%d/Exit/PreemptTimer"); 580 580 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold"); -
trunk/src/VBox/VMM/include/HMInternal.h
r45378 r45387 854 854 STAMCOUNTER StatExitIntWindow; 855 855 STAMCOUNTER StatExitMaxResume; 856 STAMCOUNTER StatExit PreemptPending;856 STAMCOUNTER StatExitExtInt; 857 857 STAMCOUNTER StatExitPreemptTimer; 858 858 STAMCOUNTER StatExitTprBelowThreshold;
Note:
See TracChangeset
for help on using the changeset viewer.