Changeset 79031 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 7, 2019 5:58:55 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 131188
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r78982 r79031 14060 14060 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF)) 14061 14061 { 14062 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF );14062 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */); 14063 14063 fCheckRemainingIntercepts = false; 14064 14064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); … … 14096 14096 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx)) 14097 14097 { 14098 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW );14098 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */); 14099 14099 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)); 14100 14100 } … … 14102 14102 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx)) 14103 14103 { 14104 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW );14104 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */); 14105 14105 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)); 14106 14106 } … … 15955 15955 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector) 15956 15956 { 15957 VBOXSTRICTRC rcStrict = iemVmxVmexit StartupIpi(pVCpu, uVector);15957 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector); 15958 15958 Assert(!pVCpu->iem.s.cActiveMappings); 15959 15959 return iemExecStatusCodeFiddling(pVCpu, rcStrict); … … 15969 15969 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15970 15970 * @param uExitReason The VM-exit reason. 15971 * @param u ExitQual The VM-exit qualification.15971 * @param u64ExitQual The Exit qualification. 15972 15972 * @thread EMT(pVCpu) 15973 15973 */ 15974 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t uExitQual) 15975 { 15976 iemVmxVmcsSetExitQual(pVCpu, uExitQual); 15977 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason); 15974 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) 15975 { 15976 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual); 15978 15977 Assert(!pVCpu->iem.s.cActiveMappings); 15979 15978 return iemExecStatusCodeFiddling(pVCpu, rcStrict); -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r78982 r79031 951 951 952 952 /** 953 * Sets the VM-exit qualification VMCS field.954 * 955 * @param pVCpu The cross context virtual CPU structure.956 * @param u ExitQual The VM-exit qualification.957 */ 958 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t u ExitQual)953 * Sets the Exit qualification VMCS field. 954 * 955 * @param pVCpu The cross context virtual CPU structure. 956 * @param u64ExitQual The Exit qualification. 957 */ 958 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t u64ExitQual) 959 959 { 960 960 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 961 pVmcs->u64RoExitQual.u = u ExitQual;961 pVmcs->u64RoExitQual.u = u64ExitQual; 962 962 } 963 963 … … 966 966 * Sets the VM-exit interruption information field. 967 967 * 968 * @param pVCpu The cross context virtual CPU structure.969 * @param uExit QualThe VM-exit interruption information.968 * @param pVCpu The cross context virtual CPU structure. 969 * @param uExitIntInfo The VM-exit interruption information. 970 970 */ 971 971 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo) … … 2710 2710 * @param pVCpu The cross context virtual CPU structure. 2711 2711 * @param uExitReason The VM-exit reason. 2712 * 2713 * @remarks Make sure VM-exit qualification is updated before calling this 2714 * function! 2715 */ 2716 IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason) 2712 * @param u64ExitQual The Exit qualification. 2713 */ 2714 IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) 2717 2715 { 2718 2716 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 2719 RT_NOREF 2(pVCpu, uExitReason);2717 RT_NOREF3(pVCpu, uExitReason, u64ExitQual); 2720 2718 return VINF_EM_RAW_EMULATE_INSTR; 2721 2719 # else … … 2738 2736 2739 2737 /* 2740 * Update the VM-exit reason. Other VMCS data fields are expected to be updated by the caller already. 2738 * Update the VM-exit reason and Exit qualification. 2739 * Other VMCS read-only data fields are expected to be updated by the caller already. 2741 2740 */ 2742 2741 pVmcs->u32RoExitReason = uExitReason; 2743 Log3(("vmexit: uExitReason=%#RX32 uExitQual=%#RX64 cs:rip=%04x:%#RX64\n", uExitReason, pVmcs->u64RoExitQual, 2742 pVmcs->u64RoExitQual.u = u64ExitQual; 2743 Log3(("vmexit: uExitReason=%#RX32 u64ExitQual=%#RX64 cs:rip=%04x:%#RX64\n", uExitReason, pVmcs->u64RoExitQual.u, 2744 2744 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 2745 2745 … … 2887 2887 /* 2888 2888 * For instructions where any of the following fields are not applicable: 2889 * - Exit qualification must be cleared. 2889 2890 * - VM-exit instruction info. is undefined. 2890 * - VM-exit qualification must be cleared. 2891 * - VM-exit guest-linear address is undefined. 2892 * - VM-exit guest-physical address is undefined. 2891 * - Guest-linear address is undefined. 2892 * - Guest-physical address is undefined. 2893 2893 * 2894 2894 * The VM-exit instruction length is mandatory for all VM-exits that are caused by … … 2910 2910 /* Update all the relevant fields from the VM-exit instruction information struct. */ 2911 2911 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u); 2912 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);2913 2912 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr); 2914 2913 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr); … … 2916 2915 2917 2916 /* Perform the VM-exit. */ 2918 return iemVmxVmexit(pVCpu, pExitInfo->uReason );2917 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual); 2919 2918 } 2920 2919 … … 2972 2971 * 2973 2972 * This is intended for instructions that have a ModR/M byte and update the VM-exit 2974 * instruction information and VM-exit qualification fields.2973 * instruction information and Exit qualification fields. 2975 2974 * 2976 2975 * @param pVCpu The cross context virtual CPU structure. … … 2989 2988 2990 2989 /* 2991 * Update the VM-exit qualification field with displacement bytes.2990 * Update the Exit qualification field with displacement bytes. 2992 2991 * See Intel spec. 27.2.1 "Basic VM-Exit Information". 2993 2992 */ … … 3017 3016 ExitInfo.InstrInfo.u = uInstrInfo; 3018 3017 3019 /* Update the VM-exit qualification. */3018 /* Update the Exit qualification. */ 3020 3019 ExitInfo.u64Qual = GCPtrDisp; 3021 3020 break; … … 3649 3648 { 3650 3649 /* 3651 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.3650 * Task-switch VM-exits are unconditional and provide the Exit qualification. 3652 3651 * 3653 3652 * If the cause of the task switch is due to execution of CALL, IRET or the JMP … … 3672 3671 } 3673 3672 3674 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss) 3675 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType); 3676 iemVmxVmcsSetExitQual(pVCpu, uExitQual); 3673 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss) 3674 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType); 3677 3675 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr); 3678 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH );3676 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual); 3679 3677 } 3680 3678 … … 3697 3695 Assert(pExitEventInfo); 3698 3696 3699 /* The VM-exit qualification is mandatory for all task-switch VM-exits. */3697 /* The Exit qualification is mandatory for all task-switch VM-exits. */ 3700 3698 uint64_t const u64ExitQual = pExitInfo->u64Qual; 3701 3699 iemVmxVmcsSetExitQual(pVCpu, u64ExitQual); … … 3743 3741 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr); 3744 3742 } 3745 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH );3743 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual); 3746 3744 } 3747 3745 … … 3775 3773 pVmcs->u32PreemptTimer = 0; 3776 3774 3777 /* Cause the VMX-preemption timer VM-exit. The VM-exit qualification MBZ. */3778 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER );3775 /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */ 3776 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER, 0 /* u64ExitQual */); 3779 3777 } 3780 3778 } … … 3820 3818 */ 3821 3819 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)) 3822 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT );3820 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */); 3823 3821 3824 3822 /* … … 3846 3844 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1); 3847 3845 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo); 3848 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT );3846 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */); 3849 3847 } 3850 3848 } 3851 3849 3852 3850 return VINF_VMX_INTERCEPT_NOT_ACTIVE; 3853 }3854 3855 3856 /**3857 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).3858 *3859 * @returns VBox strict status code.3860 * @param pVCpu The cross context virtual CPU structure.3861 * @param uVector The SIPI vector.3862 */3863 IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)3864 {3865 iemVmxVmcsSetExitQual(pVCpu, uVector);3866 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);3867 3851 } 3868 3852 … … 3891 3875 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo); 3892 3876 iemVmxVmcsSetExitIntErrCode(pVCpu, 0); 3893 iemVmxVmcsSetExitQual(pVCpu, 0);3894 3877 iemVmxVmcsSetExitInstrLen(pVCpu, 0); 3895 3878 … … 3906 3889 iemVmxVmcsSetIdtVectoringInfo(pVCpu, 0); 3907 3890 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, 0); 3908 3909 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI); 3891 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, 0 /* u64ExitQual */); 3910 3892 } 3911 3893 … … 3931 3913 Assert(VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo)); 3932 3914 3933 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);3934 3915 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr); 3935 3916 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo); … … 3937 3918 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo); 3938 3919 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode); 3939 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI );3920 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, pExitInfo->u64Qual); 3940 3921 } 3941 3922 … … 4023 4004 4024 4005 /* Construct the rest of the event related information fields and cause the VM-exit. */ 4025 uint64_t u ExitQual;4006 uint64_t u64ExitQual; 4026 4007 if (uVector == X86_XCPT_PF) 4027 4008 { 4028 4009 Assert(fFlags & IEM_XCPT_FLAGS_CR2); 4029 u ExitQual = uCr2;4010 u64ExitQual = uCr2; 4030 4011 } 4031 4012 else if (uVector == X86_XCPT_DB) 4032 4013 { 4033 4014 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6); 4034 u ExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;4035 } 4036 else 4037 u ExitQual = 0;4015 u64ExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK; 4016 } 4017 else 4018 u64ExitQual = 0; 4038 4019 4039 4020 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret; … … 4047 4028 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo); 4048 4029 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode); 4049 iemVmxVmcsSetExitQual(pVCpu, uExitQual);4050 4030 4051 4031 /* … … 4060 4040 iemVmxVmcsSetExitInstrLen(pVCpu, 0); 4061 4041 4062 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI );4042 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, u64ExitQual); 4063 4043 } 4064 4044 … … 4087 4067 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, 0); 4088 4068 4089 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT );4069 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */); 4090 4070 } 4091 4071 … … 4114 4094 enmAccess = VMXAPICACCESS_LINEAR_WRITE; 4115 4095 4116 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess) 4117 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess); 4118 iemVmxVmcsSetExitQual(pVCpu, uExitQual); 4119 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS); 4096 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess) 4097 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess); 4098 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, u64ExitQual); 4120 4099 } 4121 4100 … … 4139 4118 iemVmxVmcsSetExitIntInfo(pVCpu, 0); 4140 4119 iemVmxVmcsSetExitIntErrCode(pVCpu, 0); 4141 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);4142 4120 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo); 4143 4121 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode); 4144 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS );4122 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, pExitInfo->u64Qual); 4145 4123 } 4146 4124 … … 4156 4134 { 4157 4135 Assert(offApic < XAPIC_OFF_END + 4); 4158 4159 /* Write only bits 11:0 of the APIC offset into the VM-exit qualification field. */ 4136 /* Write only bits 11:0 of the APIC offset into the Exit qualification field. */ 4160 4137 offApic &= UINT16_C(0xfff); 4161 iemVmxVmcsSetExitQual(pVCpu, offApic); 4162 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE); 4163 } 4164 4165 4166 /** 4167 * VMX VM-exit handler for virtualized-EOIs. 4168 * 4169 * @param pVCpu The cross context virtual CPU structure. 4170 */ 4171 IEM_STATIC VBOXSTRICTRC iemVmxVmexitVirtEoi(PVMCPU pVCpu, uint8_t uVector) 4172 { 4173 iemVmxVmcsSetExitQual(pVCpu, uVector); 4174 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI); 4138 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE, offApic); 4175 4139 } 4176 4140 … … 4926 4890 { 4927 4891 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold)); 4928 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD );4892 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD, 0 /* u64ExitQual */); 4929 4893 } 4930 4894 } … … 5001 4965 iemVmxPprVirtualization(pVCpu); 5002 4966 if (iemVmxIsEoiInterceptSet(pVCpu, uVector)) 5003 return iemVmxVmexit VirtEoi(pVCpu, uVector);4967 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI, uVector); 5004 4968 iemVmxEvalPendingVirtIntrs(pVCpu); 5005 4969 return VINF_SUCCESS; … … 5901 5865 /* 5902 5866 * We don't support injecting NMIs when blocking-by-STI would be in effect. 5903 * We update the VM-exit qualification only when blocking-by-STI is set5867 * We update the Exit qualification only when blocking-by-STI is set 5904 5868 * without blocking-by-MovSS being set. Although in practise it does not 5905 5869 * make much difference since the order of checks are implementation defined. … … 7051 7015 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry. 7052 7016 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure 7053 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated7017 * recording the MSR index in the Exit qualification (as per the Intel spec.) and indicated 7054 7018 * further by our own, specific diagnostic code. Later, we can try implement handling of the 7055 7019 * MSR in ring-0 if possible, or come up with a better, generic solution. … … 7489 7453 * specified to be undefined needs to be initialized here. 7490 7454 * 7491 * Thus, it is especially important to clear the VM-exit qualification field7455 * Thus, it is especially important to clear the Exit qualification field 7492 7456 * since it must be zero for VM-exits where it is not used. Similarly, the 7493 7457 * VM-exit interruption information field's valid bit needs to be cleared for … … 7644 7608 * See Intel spec. 24.11.4 "Software Access to Related Structures". 7645 7609 */ 7646 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)); 7610 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 7611 Assert(pVmcs); 7647 7612 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu)); 7613 7648 7614 int rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr); 7649 7615 if (RT_SUCCESS(rc)) … … 7682 7648 /* VMLAUNCH instruction must update the VMCS launch state. */ 7683 7649 if (uInstrId == VMXINSTRID_VMLAUNCH) 7684 pV Cpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;7650 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; 7685 7651 7686 7652 /* Perform the VMX transition (PGM updates). */ … … 7752 7718 return rcStrict; 7753 7719 } 7754 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED); 7720 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED, 7721 pVmcs->u64RoExitQual.u); 7755 7722 } 7756 7723 } 7757 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED); 7724 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED, 7725 pVmcs->u64RoExitQual.u); 7758 7726 } 7759 7727
Note:
See TracChangeset
for help on using the changeset viewer.