Changeset 79659 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 10, 2019 8:38:39 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r79653 r79659 26 26 #include <iprt/thread.h> 27 27 #include <iprt/mem.h> 28 #include <iprt/mp.h> 28 29 29 30 #include <VBox/vmm/pdmapi.h> … … 10598 10599 10599 10600 /** 10600 * Wrapper for dispatching host NMIs. 10601 * Worker function passed to RTMpOnSpecific() that is to be called on the target 10602 * CPU. 10603 * 10604 * @param idCpu The ID for the CPU the function is called on. 10605 * @param pvUser1 Null, not used. 10606 * @param pvUser2 Null, not used. 10607 */ 10608 static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2) 10609 { 10610 RT_NOREF2(pvUser1, pvUser2); 10611 VMXDispatchHostNmi(); 10612 } 10613 10614 10615 /** 10616 * Dispatching a host NMI on the host CPU that received the NMI. 10601 10617 * 10602 10618 * @returns VBox status code. 10603 * @param pVCpu The cross context virtual CPU structure. 10604 */ 10605 static int hmR0VmxExitHostNmi(PVMCPU pVCpu) 10606 { 10607 VMXDispatchHostNmi(); 10608 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC); 10609 return VINF_SUCCESS; 10619 * @param pVCpu The cross context virtual CPU structure. 10620 * @param pVmcsInfo The VMCS info. object corresponding to the VMCS that was 10621 * executing when receiving the host NMI. 10622 */ 10623 static int hmR0VmxExitHostNmi(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 10624 { 10625 RTCPUID const idCpu = pVmcsInfo->idHostCpu; 10626 10627 /* 10628 * We don't want to delay dispatching the NMI any more than we have to. However, 10629 * we have already chosen -not- to dispatch NMIs when interrupts were still disabled 10630 * after executing guest or nested-guest code for the following reasons: 10631 * 10632 * - We would need to perform VMREADs with interrupts disabled and is orders of 10633 * magnitude worse with nested virtualization. 10634 * 10635 * - It affects the common VM-exit scenario and keep interrupts disabled for a 10636 * longer period of time just for handling an edge case like host NMIs. 10637 * 10638 * Let's cover the most likely scenario first. Check if we are on the target CPU 10639 * and dispatch the NMI right away. This should be much faster than calling into 10640 * RTMpOnSpecific() machinery. 10641 */ 10642 bool fDispatched = false; 10643 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 10644 if (idCpu == RTMpCpuId()) 10645 { 10646 VMXDispatchHostNmi(); 10647 fDispatched = true; 10648 } 10649 ASMSetFlags(fEFlags); 10650 if (fDispatched) 10651 { 10652 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC); 10653 return VINF_SUCCESS; 10654 } 10655 10656 /* 10657 * RTMpOnSpecific() waits until the worker function has run on the target CPU. So 10658 * there should be no race or recursion even if we are unlucky enough to be preempted 10659 * (to the target CPU) without dispatching the host NMI above. 10660 */ 10661 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGCIpi); 10662 return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */, NULL /* pvUser2 */); 10610 10663 } 10611 10664 … … 12656 12709 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 12657 12710 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI) 12658 return hmR0VmxExitHostNmi(pVCpu );12711 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo); 12659 12712 } 12660 12713 … … 14666 14719 */ 14667 14720 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3); 14668 return hmR0VmxExitHostNmi(pVCpu );14721 return hmR0VmxExitHostNmi(pVCpu, pVmcsInfo); 14669 14722 } 14670 14723 … … 16782 16835 */ 16783 16836 case VMX_EXIT_INT_INFO_TYPE_NMI: 16784 return hmR0VmxExitHostNmi(pVCpu );16837 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo); 16785 16838 16786 16839 /*
Note:
See TracChangeset
for help on using the changeset viewer.