Changeset 99653 in vbox
- Timestamp:
- May 8, 2023 7:17:30 AM (21 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r99259 r99653 3040 3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 3041 3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 3042 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt)); 3042 3043 } 3043 3044 … … 3055 3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 3056 3057 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 3058 Log4Func(("NMI pending injection\n")); 3057 3059 } 3058 3060 … … 4374 4376 * See Intel spec. 29.4.3.2 "APIC-Write Emulation". 4375 4377 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts". 4378 * 4379 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be 4380 * handled here. They'll be handled by the hardware while executing the nested-guest 4381 * or by us when we injecting events that are not part of VM-entry of the nested-guest. 4376 4382 */ 4377 4383 if (fIsNestedGuest) 4378 4384 { 4379 /* Pending nested-guest APIC-write . */4385 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */ 4380 4386 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE)) 4381 4387 { … … 4383 4389 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu); 4384 4390 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 4385 return rcStrict; 4391 if ( rcStrict == VINF_SUCCESS 4392 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 4393 return rcStrict; 4386 4394 } 4387 4395 … … 4516 4524 AssertRC(rc); 4517 4525 } 4526 Log4Func(("Enabled interrupt-window exiting\n")); 4518 4527 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */ 4519 4528 } … … 4533 4542 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 4534 4543 AssertRC(rc); 4544 Log4Func(("Disabled interrupt-window exiting\n")); 4535 4545 } 4536 4546 } … … 4553 4563 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 4554 4564 AssertRC(rc); 4555 Log4Func((" SetupNMI-window exiting\n"));4565 Log4Func(("Enabled NMI-window exiting\n")); 4556 4566 } 4557 4567 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */ … … 4572 4582 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 4573 4583 AssertRC(rc); 4584 Log4Func(("Disabled NMI-window exiting\n")); 4574 4585 } 4575 4586 } … … 4852 4863 * @param pfIntrState Where to store the VT-x guest-interruptibility state. 4853 4864 */ 4854 static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,uint32_t *pfIntrState)4865 static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState) 4855 4866 { 4856 4867 Assert(pfIntrState); … … 4876 4887 * NMIs take priority over external interrupts. 4877 4888 */ 4878 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4879 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;4880 #endif4881 4889 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 4882 4890 { 4883 /*4884 * For a guest, the FF always indicates the guest's ability to receive an NMI.4885 *4886 * For a nested-guest, the FF always indicates the outer guest's ability to4887 * receive an NMI while the guest-interruptibility state bit depends on whether4888 * the nested-hypervisor is using virtual-NMIs.4889 */4890 4891 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 4891 4892 { 4892 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4893 if ( fIsNestedGuest 4894 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT)) 4895 return IEMExecVmxVmexitXcptNmi(pVCpu); 4896 #endif 4893 /* Finally, inject the NMI and we're done. */ 4897 4894 vmxHCSetPendingXcptNmi(pVCpu); 4898 4895 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 4899 Log4Func(("NMI pending injection\n")); 4900 4901 /* We've injected the NMI, bail. */ 4896 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo); 4902 4897 return VINF_SUCCESS; 4903 4898 } 4904 if (!fIsNestedGuest) 4905 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 4906 } 4899 4900 /* 4901 * Setup NMI-window exiting and also clear any interrupt-window exiting that might 4902 * still be active. This can happen if we got VM-exits that were higher priority 4903 * than an interrupt-window VM-exit. 4904 */ 4905 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 4906 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo); 4907 } 4908 else 4909 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)); 4907 4910 4908 4911 /* 4909 4912 * External interrupts (PIC/APIC). 4910 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.4911 * We cannot re-request the interrupt from the controller again.4912 4913 */ 4913 4914 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) … … 4918 4919 AssertRC(rc); 4919 4920 4920 /* 4921 * We must not check EFLAGS directly when executing a nested-guest, use 4922 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of 4923 * external interrupts when "External interrupt exiting" is set. This fixes a nasty 4924 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by 4925 * other VM-exits (like a preemption timer), see @bugref{9562#c18}. 4926 * 4927 * See Intel spec. 25.4.1 "Event Blocking". 4928 */ 4929 if (CPUMIsGuestPhysIntrEnabled(pVCpu)) 4921 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF) 4930 4922 { 4931 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4932 if ( fIsNestedGuest 4933 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)) 4934 { 4935 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */); 4936 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE) 4937 return rcStrict; 4938 } 4939 #endif 4923 /* 4924 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it. 4925 * We cannot re-request the interrupt from the controller again. 4926 */ 4940 4927 uint8_t u8Interrupt; 4941 4928 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 4942 4929 if (RT_SUCCESS(rc)) 4943 {4944 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4945 if ( fIsNestedGuest4946 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))4947 {4948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);4949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);4950 return rcStrict;4951 }4952 #endif4953 4930 vmxHCSetPendingExtInt(pVCpu, u8Interrupt); 4954 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));4955 }4956 4931 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 4957 4932 { 4958 4933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq); 4959 4960 if ( !fIsNestedGuest 4961 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 4934 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 4962 4935 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4); 4963 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */4964 4965 4936 /* 4966 4937 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and … … 4972 4943 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq); 4973 4944 4974 /* We've injected the interrupt or taken necessary action, bail. */ 4945 /* We must clear interrupt-window exiting for the same reason mentioned above for NMIs. */ 4946 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo); 4975 4947 return VINF_SUCCESS; 4976 4948 } 4977 if (!fIsNestedGuest) 4978 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 4979 } 4980 } 4981 else if (!fIsNestedGuest) 4949 4950 /* Setup interrupt-window exiting. */ 4951 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 4952 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)); 4953 } 4954 else 4955 { 4956 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo); 4957 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)); 4958 } 4959 } 4960 else 4982 4961 { 4983 4962 /* 4984 * An event is being injected or we are in an interrupt shadow. Check if another event is4985 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept4986 * the pending event.4963 * An event is being injected or we are in an interrupt shadow. 4964 * If another event is pending currently, instruct VT-x to cause a VM-exit as 4965 * soon as the guest is ready to accept it. 4987 4966 */ 4988 4967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 4989 4968 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 4990 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 4991 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 4992 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 4993 } 4994 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */ 4969 else 4970 { 4971 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)); 4972 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 4973 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 4974 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 4975 else 4976 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)); 4977 } 4978 } 4995 4979 4996 4980 return VINF_SUCCESS; 4997 4981 } 4982 4983 4984 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4985 /** 4986 * Evaluates the event to be delivered to the nested-guest and sets it as the 4987 * pending event. 4988 * 4989 * Toggling of interrupt force-flags here is safe since we update TRPM on premature 4990 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must 4991 * NOT restore these force-flags. 4992 * 4993 * @returns Strict VBox status code (i.e. informational status codes too). 4994 * @param pVCpu The cross context virtual CPU structure. 4995 * @param pVmcsInfo The VMCS information structure. 4996 * @param pfIntrState Where to store the VT-x guest-interruptibility state. 4997 * 4998 * @remarks The guest must be in VMX non-root mode. 4999 */ 5000 static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState) 5001 { 5002 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5003 5004 Assert(pfIntrState); 5005 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 5006 Assert(!TRPMHasTrap(pVCpu)); 5007 5008 /* 5009 * Compute/update guest-interruptibility state related FFs. 5010 * The FFs will be used below while evaluating events to be injected. 5011 */ 5012 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu); 5013 5014 /* 5015 * If we are injecting an event, we must not setup any interrupt/NMI-window 5016 * exiting or we would get into an infinite VM-exit loop. An event that's 5017 * already pending has already performed all necessary checks. 5018 */ 5019 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending) 5020 return VINF_SUCCESS; 5021 5022 /* 5023 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been 5024 * made pending (TRPM to HM event) and would be handled above if we resumed 5025 * execution in HM. If somehow we fell back to emulation after the 5026 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt 5027 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX 5028 * intercepts should be active and any events pending here have been generated 5029 * while executing the guest in VMX non-root mode after virtual VM-entry completed. 5030 */ 5031 Assert(CPUMIsGuestVmxInterceptEvents(pCtx)); 5032 5033 /* 5034 * Interrupt shadows can also block NMIs. If we are in an interrupt shadow there's 5035 * nothing more to do here. 5036 * 5037 * See Intel spec. 24.4.2 "Guest Non-Register State". 5038 * See Intel spec. 25.4.1 "Event Blocking". 5039 */ 5040 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) 5041 { /* likely */ } 5042 else 5043 return VINF_SUCCESS; 5044 5045 /** @todo SMI. SMIs take priority over NMIs. */ 5046 5047 /* 5048 * NMIs. 5049 * NMIs take priority over external interrupts. 5050 * 5051 * NMI blocking is in effect after delivering an NMI until the execution of IRET. 5052 * Only when there isn't any NMI blocking can an NMI-window VM-exit or delivery of an NMI happen. 5053 */ 5054 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 5055 { 5056 /* 5057 * Nested-guest NMI-window exiting. 5058 * The NMI-window exit must happen regardless of whether an NMI is pending 5059 * provided virtual-NMI blocking is not in effect. 5060 * 5061 * See Intel spec. 25.2 "Other Causes Of VM Exits". 5062 */ 5063 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW) 5064 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx)) 5065 { 5066 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT)); 5067 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */); 5068 } 5069 5070 /* 5071 * For a nested-guest, the FF always indicates the outer guest's ability to 5072 * receive an NMI while the guest-interruptibility state bit depends on whether 5073 * the nested-hypervisor is using virtual-NMIs. 5074 * 5075 * It is very important that we also clear the force-flag if we are causing 5076 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal 5077 * with re-injecting or discarding the NMI. This fixes the bug that showed up 5078 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}. 5079 */ 5080 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 5081 { 5082 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT)) 5083 return IEMExecVmxVmexitXcptNmi(pVCpu); 5084 vmxHCSetPendingXcptNmi(pVCpu); 5085 return VINF_SUCCESS; 5086 } 5087 } 5088 5089 /* 5090 * Nested-guest interrupt-window exiting. 5091 * 5092 * We must cause the interrupt-window exit regardless of whether an interrupt is pending 5093 * provided virtual interrupts are enabled. 5094 * 5095 * See Intel spec. 25.2 "Other Causes Of VM Exits". 5096 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery". 5097 */ 5098 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW) 5099 && CPUMIsGuestVmxVirtIntrEnabled(pCtx)) 5100 { 5101 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT)); 5102 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */); 5103 } 5104 5105 /* 5106 * External interrupts (PIC/APIC). 5107 * 5108 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF. 5109 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always. 5110 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued 5111 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}. 5112 * 5113 * See Intel spec. 25.4.1 "Event Blocking". 5114 */ 5115 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 5116 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction 5117 && CPUMIsGuestVmxPhysIntrEnabled(pCtx)) 5118 { 5119 Assert(!DBGFIsStepping(pVCpu)); 5120 5121 /* Nested-guest external interrupt VM-exit. */ 5122 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 5123 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 5124 { 5125 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */); 5126 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 5127 return rcStrict; 5128 } 5129 5130 /* 5131 * Fetch the external interrupt from the interrupt controller. 5132 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to 5133 * the nested-hypervisor. We cannot re-request the interrupt from the controller again. 5134 */ 5135 uint8_t u8Interrupt; 5136 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 5137 if (RT_SUCCESS(rc)) 5138 { 5139 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */ 5140 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)) 5141 { 5142 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)); 5143 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */); 5144 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 5145 return rcStrict; 5146 } 5147 vmxHCSetPendingExtInt(pVCpu, u8Interrupt); 5148 return VINF_SUCCESS; 5149 } 5150 } 5151 return VINF_SUCCESS; 5152 } 5153 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 4998 5154 4999 5155 … … 7663 7819 } 7664 7820 7665 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */7821 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */ 7666 7822 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo); 7667 7823 … … 10061 10217 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 10062 10218 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */); 10063 return vmxHCExit IntWindow(pVCpu, pVmxTransient);10219 return vmxHCExitNmiWindow(pVCpu, pVmxTransient); 10064 10220 } 10065 10221 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r98465 r99653 5924 5924 5925 5925 uint32_t fIntrState; 5926 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->fIsNestedGuest,5927 &fIntrState);5928 5929 5926 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5927 if (!pVmxTransient->fIsNestedGuest) 5928 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient->pVmcsInfo, &fIntrState); 5929 else 5930 rcStrict = vmxHCEvaluatePendingEventNested(pVCpu, pVmxTransient->pVmcsInfo, &fIntrState); 5931 5930 5932 /* 5931 5933 * While evaluating pending events if something failed (unlikely) or if we were … … 5941 5943 } 5942 5944 #else 5945 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient->pVmcsInfo, &fIntrState); 5943 5946 Assert(rcStrict == VINF_SUCCESS); 5944 5947 #endif -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r99557 r99653 3739 3739 3740 3740 uint32_t fIntrState; 3741 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/,&fIntrState);3741 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, &fIntrState); 3742 3742 3743 3743 /*
Note:
See TracChangeset
for help on using the changeset viewer.