Changeset 73606 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Aug 10, 2018 7:38:56 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73437 r73606 32 32 #include <VBox/vmm/selm.h> 33 33 #include <VBox/vmm/tm.h> 34 #include <VBox/vmm/em.h> 34 35 #include <VBox/vmm/gim.h> 35 36 #include <VBox/vmm/apic.h> … … 196 197 { 197 198 /** The host's rflags/eflags. */ 198 RTCCUINTREG fEFlags;199 RTCCUINTREG fEFlags; 199 200 #if HC_ARCH_BITS == 32 200 uint32_t u32Alignment0;201 uint32_t u32Alignment0; 201 202 #endif 202 203 /** The guest's TPR value used for TPR shadowing. */ 203 uint8_t u8GuestTpr;204 uint8_t u8GuestTpr; 204 205 /** Alignment. */ 205 uint8_t abAlignment0[7];206 uint8_t abAlignment0[7]; 206 207 207 208 /** The basic VM-exit reason. */ 208 uint16_t uExitReason;209 uint16_t uExitReason; 209 210 /** Alignment. */ 210 uint16_t u16Alignment0;211 uint16_t u16Alignment0; 211 212 /** The VM-exit interruption error code. */ 212 uint32_t uExitIntErrorCode;213 uint32_t uExitIntErrorCode; 213 214 /** The VM-exit exit code qualification. */ 214 uint64_t uExitQualification;215 uint64_t uExitQual; 215 216 216 217 /** The VM-exit interruption-information field. */ 217 uint32_t uExitIntInfo;218 uint32_t uExitIntInfo; 218 219 /** The VM-exit instruction-length field. */ 219 uint32_t cbInstr;220 uint32_t cbInstr; 220 221 /** The VM-exit instruction-information field. */ 221 union 222 { 223 /** Plain unsigned int representation. */ 224 uint32_t u; 225 /** INS and OUTS information. */ 226 struct 227 { 228 uint32_t u7Reserved0 : 7; 229 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */ 230 uint32_t u3AddrSize : 3; 231 uint32_t u5Reserved1 : 5; 232 /** The segment register (X86_SREG_XXX). */ 233 uint32_t iSegReg : 3; 234 uint32_t uReserved2 : 14; 235 } StrIo; 236 /** INVEPT, INVVPID, INVPCID information. */ 237 struct 238 { 239 /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */ 240 uint32_t u2Scaling : 2; 241 uint32_t u5Reserved0 : 5; 242 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */ 243 uint32_t u3AddrSize : 3; 244 uint32_t u1Reserved0 : 1; 245 uint32_t u4Reserved0 : 4; 246 /** The segment register (X86_SREG_XXX). */ 247 uint32_t iSegReg : 3; 248 /** The index register (X86_GREG_XXX). */ 249 uint32_t iIdxReg : 4; 250 /** Set if index register is invalid. */ 251 uint32_t fIdxRegValid : 1; 252 /** The base register (X86_GREG_XXX). */ 253 uint32_t iBaseReg : 4; 254 /** Set if base register is invalid. */ 255 uint32_t fBaseRegValid : 1; 256 /** Register 2 (X86_GREG_XXX). */ 257 uint32_t iReg2 : 4; 258 } Inv; 259 } ExitInstrInfo; 222 VMXEXITINSTRINFO ExitInstrInfo; 260 223 /** Whether the VM-entry failed or not. */ 261 bool fVMEntryFailed;224 bool fVMEntryFailed; 262 225 /** Alignment. */ 263 uint8_t abAlignment1[3];226 uint8_t abAlignment1[3]; 264 227 265 228 /** The VM-entry interruption-information field. */ 266 uint32_t uEntryIntInfo;229 uint32_t uEntryIntInfo; 267 230 /** The VM-entry exception error code field. */ 268 uint32_t uEntryXcptErrorCode;231 uint32_t uEntryXcptErrorCode; 269 232 /** The VM-entry instruction length field. */ 270 uint32_t cbEntryInstr;233 uint32_t cbEntryInstr; 271 234 272 235 /** IDT-vectoring information field. */ 273 uint32_t uIdtVectoringInfo;236 uint32_t uIdtVectoringInfo; 274 237 /** IDT-vectoring error code. */ 275 uint32_t uIdtVectoringErrorCode;238 uint32_t uIdtVectoringErrorCode; 276 239 277 240 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */ 278 uint32_t fVmcsFieldsRead;241 uint32_t fVmcsFieldsRead; 279 242 280 243 /** Whether the guest debug state was active at the time of VM-exit. */ 281 bool fWasGuestDebugStateActive;244 bool fWasGuestDebugStateActive; 282 245 /** Whether the hyper debug state was active at the time of VM-exit. */ 283 bool fWasHyperDebugStateActive;246 bool fWasHyperDebugStateActive; 284 247 /** Whether TSC-offsetting should be setup before VM-entry. */ 285 bool fUpdateTscOffsettingAndPreemptTimer;248 bool fUpdateTscOffsettingAndPreemptTimer; 286 249 /** Whether the VM-exit was caused by a page-fault during delivery of a 287 250 * contributory exception or a page-fault. */ 288 bool fVectoringDoublePF;251 bool fVectoringDoublePF; 289 252 /** Whether the VM-exit was caused by a page-fault during delivery of an 290 253 * external interrupt or NMI. */ 291 bool fVectoringPF;254 bool fVectoringPF; 292 255 } VMXTRANSIENT; 293 256 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t)); … … 404 367 static FNVMXEXITHANDLER hmR0VmxExitRdpmc; 405 368 static FNVMXEXITHANDLER hmR0VmxExitVmcall; 369 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 370 static FNVMXEXITHANDLER hmR0VmxExitVmclear; 371 static FNVMXEXITHANDLER hmR0VmxExitVmlaunch; 372 static FNVMXEXITHANDLER hmR0VmxExitVmptrld; 373 static FNVMXEXITHANDLER hmR0VmxExitVmptrst; 374 static FNVMXEXITHANDLER hmR0VmxExitVmread; 375 static FNVMXEXITHANDLER hmR0VmxExitVmresume; 376 static FNVMXEXITHANDLER hmR0VmxExitVmwrite; 377 static FNVMXEXITHANDLER hmR0VmxExitVmxoff; 378 static FNVMXEXITHANDLER hmR0VmxExitVmxon; 379 #endif 406 380 static FNVMXEXITHANDLER hmR0VmxExitRdtsc; 407 381 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm; … … 473 447 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm, 474 448 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall, 449 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 450 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitVmclear, 451 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitVmlaunch, 452 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitVmptrld, 453 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitVmptrst, 454 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitVmread, 455 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitVmresume, 456 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitVmwrite, 457 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitVmxoff, 458 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitVmxon, 459 #else 475 460 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD, 476 461 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD, … … 482 467 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD, 483 468 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD, 469 #endif 484 470 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx, 485 471 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx, … … 719 705 * @param pVmxTransient Pointer to the VMX transient structure. 720 706 */ 721 DECLINLINE(int) hmR0VmxReadExitQual ificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)707 DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 722 708 { 723 709 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION)) 724 710 { 725 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual ification); NOREF(pVCpu);711 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu); 726 712 AssertRCReturn(rc, rc); 727 713 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION; … … 4999 4985 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason); 5000 4986 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 5001 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);4987 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 5002 4988 AssertRC(rc); 5003 4989 … … 5009 4995 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason, 5010 4996 pVmxTransient->uExitReason)); 5011 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual ification));4997 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual)); 5012 4998 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError)); 5013 4999 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX) … … 5788 5774 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu) 5789 5775 { 5790 uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID 5791 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT) 5792 | VMX_EXIT_INT_INFO_ERROR_CODE_VALID; 5776 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF) 5777 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5778 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5779 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5793 5780 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5794 5781 } … … 5802 5789 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu) 5803 5790 { 5804 uint32_t const u32IntInfo = X86_XCPT_UD | VMX_EXIT_INT_INFO_VALID 5805 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5791 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD) 5792 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5793 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5794 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5806 5795 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5807 5796 } … … 5815 5804 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu) 5816 5805 { 5817 uint32_t const u32IntInfo = X86_XCPT_DB | VMX_EXIT_INT_INFO_VALID 5818 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5806 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB) 5807 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5808 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5809 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5819 5810 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5820 5811 } … … 5830 5821 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr) 5831 5822 { 5832 uint32_t const u32IntInfo = X86_XCPT_OF | VMX_EXIT_INT_INFO_VALID 5833 | (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5823 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_OF) 5824 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_SW_INT) 5825 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5826 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5834 5827 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5835 5828 } 5829 5830 5831 /** 5832 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM. 5833 * 5834 * @param pVCpu The cross context virtual CPU structure. 5835 * @param u32ErrCode The error code for the general-protection exception. 5836 */ 5837 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode) 5838 { 5839 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP) 5840 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5841 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5842 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5843 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */); 5844 } 5845 5846 5847 /** 5848 * Sets a stack (\#SS) exception as pending-for-injection into the VM. 5849 * 5850 * @param pVCpu The cross context virtual CPU structure. 5851 * @param u32ErrCode The error code for the stack exception. 5852 */ 5853 DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode) 5854 { 5855 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS) 5856 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5857 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5858 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5859 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */); 5860 } 5861 5862 5863 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5864 5865 /** 5866 * Decodes the memory operand of a VM-exit due to instruction execution. 5867 * 5868 * For instructions with two operands, the second operand is usually found in the 5869 * VM-exit qualification field. 5870 * 5871 * @returns Strict VBox status code (i.e. informational status codes too). 5872 * @retval VINF_SUCCESS if the operand was successfully decoded. 5873 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the 5874 * operand. 5875 * @param pVCpu The cross context virtual CPU structure. 5876 * @param pExitInstrInfo Pointer to the VM-exit instruction information. 5877 * @param fIsWrite Whether the operand is a destination memory operand 5878 * (i.e. writeable memory location) or not. 5879 * @param GCPtrDisp The instruction displacement field, if any. For 5880 * RIP-relative addressing pass RIP + displacement here. 5881 * @param pGCPtrMem Where to store the destination memory operand. 5882 */ 5883 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite, 5884 PRTGCPTR pGCPtrMem) 5885 { 5886 Assert(pExitInstrInfo); 5887 Assert(pGCPtrMem); 5888 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu)); 5889 5890 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) }; 5891 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) }; 5892 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks)); 5893 5894 uint8_t const uAddrSize = pExitInstrInfo->InvVmxXsaves.u3AddrSize; 5895 uint8_t const iSegReg = pExitInstrInfo->InvVmxXsaves.iSegReg; 5896 bool const fIdxRegValid = !pExitInstrInfo->InvVmxXsaves.fIdxRegInvalid; 5897 uint8_t const iIdxReg = pExitInstrInfo->InvVmxXsaves.iIdxReg; 5898 uint8_t const uScale = pExitInstrInfo->InvVmxXsaves.u2Scaling; 5899 bool const fBaseRegValid = !pExitInstrInfo->InvVmxXsaves.fBaseRegInvalid; 5900 uint8_t const iBaseReg = pExitInstrInfo->InvVmxXsaves.iBaseReg; 5901 bool const fIsMemOperand = !pExitInstrInfo->InvVmxXsaves.fIsRegOperand; 5902 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx); 5903 5904 /* 5905 * Validate instruction information. 5906 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code. 5907 */ 5908 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks), 5909 ("Invalid address size. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_1); 5910 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT, 5911 ("Invalid segment register. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_2); 5912 AssertLogRelMsgReturn(fIsMemOperand, 5913 ("Expected memory operand. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_3); 5914 5915 /* 5916 * Compute the complete effective address. 5917 * 5918 * See AMD instruction spec. 1.4.2 "SIB Byte Format" 5919 * See AMD spec. 4.5.2 "Segment Registers". 5920 */ 5921 RTGCPTR GCPtrMem = GCPtrDisp; 5922 if (fBaseRegValid) 5923 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64; 5924 if (fIdxRegValid) 5925 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale; 5926 5927 RTGCPTR const GCPtrOff = GCPtrMem; 5928 if ( !fIsLongMode 5929 || iSegReg >= X86_SREG_FS) 5930 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base; 5931 GCPtrMem &= s_auAddrSizeMasks[uAddrSize]; 5932 5933 /* 5934 * Validate effective address. 5935 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode". 5936 */ 5937 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize]; 5938 Assert(cbAccess > 0); 5939 if (fIsLongMode) 5940 { 5941 if (X86_IS_CANONICAL(GCPtrMem)) 5942 { 5943 *pGCPtrMem = GCPtrMem; 5944 return VINF_SUCCESS; 5945 } 5946 5947 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem)); 5948 hmR0VmxSetPendingXcptGP(pVCpu, 0); 5949 return VINF_HM_PENDING_XCPT; 5950 } 5951 5952 /* 5953 * This is a watered down version of iemMemApplySegment(). 5954 * Parts that are not applicable for VMX instructions like real-or-v8086 mode 5955 * and segment CPL/DPL checks are skipped. 5956 */ 5957 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff; 5958 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1; 5959 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg]; 5960 5961 /* Check if the segment is present and usable. */ 5962 if ( pSel->Attr.n.u1Present 5963 && !pSel->Attr.n.u1Unusable) 5964 { 5965 Assert(pSel->Attr.n.u1DescType); 5966 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE)) 5967 { 5968 /* Check permissions for the data segment. */ 5969 if ( fIsWrite 5970 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE)) 5971 { 5972 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u)); 5973 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg); 5974 return VINF_HM_PENDING_XCPT; 5975 } 5976 5977 /* Check limits if it's a normal data segment. */ 5978 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN)) 5979 { 5980 if ( GCPtrFirst32 > pSel->u32Limit 5981 || GCPtrLast32 > pSel->u32Limit) 5982 { 5983 Log4Func(("Data segment limit exceeded." 5984 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32, 5985 GCPtrLast32, pSel->u32Limit)); 5986 if (iSegReg == X86_SREG_SS) 5987 hmR0VmxSetPendingXcptSS(pVCpu, 0); 5988 else 5989 hmR0VmxSetPendingXcptGP(pVCpu, 0); 5990 return VINF_HM_PENDING_XCPT; 5991 } 5992 } 5993 else 5994 { 5995 /* Check limits if it's an expand-down data segment. 5996 Note! The upper boundary is defined by the B bit, not the G bit! */ 5997 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1) 5998 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))) 5999 { 6000 Log4Func(("Expand-down data segment limit exceeded." 6001 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32, 6002 GCPtrLast32, pSel->u32Limit)); 6003 if (iSegReg == X86_SREG_SS) 6004 hmR0VmxSetPendingXcptSS(pVCpu, 0); 6005 else 6006 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6007 return VINF_HM_PENDING_XCPT; 6008 } 6009 } 6010 } 6011 else 6012 { 6013 /* Check permissions for the code segment. */ 6014 if ( fIsWrite 6015 || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) 6016 { 6017 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u)); 6018 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)); 6019 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6020 return VINF_HM_PENDING_XCPT; 6021 } 6022 6023 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */ 6024 if ( GCPtrFirst32 > pSel->u32Limit 6025 || GCPtrLast32 > pSel->u32Limit) 6026 { 6027 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", 6028 GCPtrFirst32, GCPtrLast32, pSel->u32Limit)); 6029 if (iSegReg == X86_SREG_SS) 6030 hmR0VmxSetPendingXcptSS(pVCpu, 0); 6031 else 6032 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6033 return VINF_HM_PENDING_XCPT; 6034 } 6035 } 6036 } 6037 else 6038 { 6039 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u)); 6040 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6041 return VINF_HM_PENDING_XCPT; 6042 } 6043 6044 *pGCPtrMem = GCPtrMem; 6045 return VINF_SUCCESS; 6046 } 6047 6048 6049 /** 6050 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the 6051 * guest attempting to execute a VMX instruction. 6052 * 6053 * @returns Strict VBox status code (i.e. informational status codes too). 6054 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 6055 * @retval VINF_HM_PENDING_XCPT if an exception was raised. 6056 * 6057 * @param pVCpu The cross context virtual CPU structure. 6058 * @param pVmxTransient Pointer to the VMX transient structure. 6059 * 6060 * @todo NstVmx: Document other error codes when VM-exit is implemented. 6061 * @remarks No-long-jump zone!!! 6062 */ 6063 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 6064 { 6065 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS 6066 | CPUMCTX_EXTRN_HWVIRT); 6067 6068 if ( CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx) 6069 || ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx) 6070 && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))) 6071 { 6072 Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n")); 6073 hmR0VmxSetPendingXcptUD(pVCpu); 6074 return VINF_HM_PENDING_XCPT; 6075 } 6076 6077 if (pVmxTransient->uExitReason == VMX_EXIT_VMXON) 6078 { 6079 /* 6080 * We check CR4.VMXE because it is required to be always set while in VMX operation 6081 * by physical CPUs and our CR4 read shadow is only consulted when executing specific 6082 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation 6083 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0). 6084 */ 6085 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx)) 6086 { 6087 Log4Func(("CR4.VMXE is not set -> #UD\n")); 6088 hmR0VmxSetPendingXcptUD(pVCpu); 6089 return VINF_HM_PENDING_XCPT; 6090 } 6091 } 6092 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)) 6093 { 6094 /* 6095 * The guest has not entered VMX operation but attempted to execute a VMX instruction 6096 * (other than VMXON), we need to raise a #UD. 6097 */ 6098 Log4Func(("Not in VMX root mode -> #UD\n")); 6099 hmR0VmxSetPendingXcptUD(pVCpu); 6100 return VINF_HM_PENDING_XCPT; 6101 } 6102 6103 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 6104 { 6105 /* 6106 * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let 6107 * the guest hypervisor deal with it. 6108 */ 6109 /** @todo NSTVMX: Trigger a VM-exit */ 6110 } 6111 6112 /* 6113 * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept 6114 * (above) takes preceedence over the CPL check. 6115 */ 6116 if (CPUMGetGuestCPL(pVCpu) > 0) 6117 { 6118 Log4Func(("CPL > 0 -> #GP(0)\n")); 6119 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6120 return VINF_HM_PENDING_XCPT; 6121 } 6122 6123 return VINF_SUCCESS; 6124 } 6125 6126 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 5836 6127 5837 6128 … … 5861 6152 5862 6153 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 5863 if (VMX_IDT_VECTORING_INFO_ VALID(pVmxTransient->uIdtVectoringInfo))6154 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 5864 6155 { 5865 6156 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); … … 8190 8481 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 8191 8482 8483 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_ONLY_IN_IEM 8484 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 8485 return VINF_EM_RESCHEDULE_REM; 8486 #endif 8487 8192 8488 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 8193 8489 PGMRZDynMapFlushAutoSet(pVCpu); … … 9319 9615 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break; 9320 9616 case VMX_EXIT_MOV_CRX: 9321 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9322 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)9617 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9618 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ) 9323 9619 SET_BOTH(CRX_READ); 9324 9620 else 9325 9621 SET_BOTH(CRX_WRITE); 9326 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual ification);9622 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 9327 9623 break; 9328 9624 case VMX_EXIT_MOV_DRX: 9329 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9330 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification)9625 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9626 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) 9331 9627 == VMX_EXIT_QUAL_DRX_DIRECTION_READ) 9332 9628 SET_BOTH(DRX_READ); 9333 9629 else 9334 9630 SET_BOTH(DRX_WRITE); 9335 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification);9631 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual); 9336 9632 break; 9337 9633 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break; … … 9408 9704 if (fDtrace1 || fDtrace2) 9409 9705 { 9410 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9706 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9411 9707 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9412 9708 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 9593 9889 else 9594 9890 { 9595 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9891 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9596 9892 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9597 9893 AssertRC(rc); 9598 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual ification);9894 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual); 9599 9895 } 9600 9896 … … 10833 11129 } 10834 11130 11131 11132 /** @name VM-exit handlers. 11133 * @{ 11134 */ 10835 11135 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 10836 11136 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ 10837 11137 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 10838 10839 /** @name VM-exit handlers.10840 * @{10841 */10842 11138 10843 11139 /** … … 10961 11257 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 10962 11258 AssertRCReturn(rc, rc); 10963 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),11259 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo), 10964 11260 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 10965 11261 0 /* GCPtrFaultAddress */); … … 11270 11566 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop); 11271 11567 11272 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);11568 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 11273 11569 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11274 11570 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS); 11275 11571 AssertRCReturn(rc, rc); 11276 11572 11277 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual ification);11573 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual); 11278 11574 11279 11575 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3) … … 11285 11581 } 11286 11582 else 11287 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", 11288 pVmxTransient->uExitQualification,VBOXSTRICTRC_VAL(rcStrict)));11583 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual, 11584 VBOXSTRICTRC_VAL(rcStrict))); 11289 11585 return rcStrict; 11290 11586 } … … 11888 12184 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 11889 12185 11890 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12186 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 11891 12187 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11892 12188 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 11895 12191 VBOXSTRICTRC rcStrict; 11896 12192 PVM pVM = pVCpu->CTX_SUFF(pVM); 11897 RTGCUINTPTR const uExitQual ification = pVmxTransient->uExitQualification;11898 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);12193 RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual; 12194 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual); 11899 12195 switch (uAccessType) 11900 12196 { … … 11902 12198 { 11903 12199 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 11904 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, 11905 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 11906 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification)); 12200 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 12201 VMX_EXIT_QUAL_CRX_GENREG(uExitQual)); 11907 12202 AssertMsg( rcStrict == VINF_SUCCESS 11908 12203 || rcStrict == VINF_IEM_RAISED_XCPT 11909 12204 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 11910 12205 11911 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification))12206 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 11912 12207 { 11913 12208 case 0: … … 11916 12211 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 11917 12212 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 11918 Log4 (("CRXCR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));12213 Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0)); 11919 12214 11920 12215 /* … … 11935 12230 { 11936 12231 /** @todo check selectors rather than returning all the time. */ 11937 Log4 (("CRx CR0 write:back to real mode -> VINF_EM_RESCHEDULE_REM\n"));12232 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n")); 11938 12233 rcStrict = VINF_EM_RESCHEDULE_REM; 11939 12234 } … … 11956 12251 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 11957 12252 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 11958 Log4 (("CRXCR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));12253 Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3)); 11959 12254 break; 11960 12255 } … … 11965 12260 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 11966 12261 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 11967 Log4 (("CRXCR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),11968 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));12262 Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 12263 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 11969 12264 break; 11970 12265 } … … 11979 12274 } 11980 12275 default: 11981 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification)));12276 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))); 11982 12277 break; 11983 12278 } … … 11990 12285 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 11991 12286 || pVCpu->hm.s.fUsingDebugLoop 11992 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification) != 3);12287 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3); 11993 12288 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 11994 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification) != 812289 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8 11995 12290 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 11996 12291 11997 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, 11998 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification), 11999 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)); 12292 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual), 12293 VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)); 12000 12294 AssertMsg( rcStrict == VINF_SUCCESS 12001 12295 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12002 12296 #ifdef VBOX_WITH_STATISTICS 12003 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification))12297 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 12004 12298 { 12005 12299 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; … … 12010 12304 } 12011 12305 #endif 12012 Log4 (("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),12306 Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 12013 12307 VBOXSTRICTRC_VAL(rcStrict))); 12014 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual ification) == X86_GREG_xSP)12308 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP) 12015 12309 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP); 12016 12310 else … … 12027 12321 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12028 12322 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 12029 Log4 (("CRXCLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));12323 Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); 12030 12324 break; 12031 12325 } … … 12034 12328 { 12035 12329 /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */ 12036 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, 12037 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification)); 12330 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual)); 12038 12331 AssertMsg( rcStrict == VINF_SUCCESS 12039 12332 || rcStrict == VINF_IEM_RAISED_XCPT … … 12042 12335 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12043 12336 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 12044 Log4 (("CRXLMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));12337 Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); 12045 12338 break; 12046 12339 } … … 12075 12368 12076 12369 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12077 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12370 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12078 12371 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12079 12372 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER); … … 12082 12375 12083 12376 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 12084 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification); 12085 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification); 12086 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification) 12087 == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12088 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification); 12377 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 12378 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual); 12379 bool fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12380 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 12089 12381 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 12090 12382 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; … … 12124 12416 * interpreting the instruction. 12125 12417 */ 12126 Log4 (("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));12418 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12127 12419 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2); 12128 12420 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS); … … 12134 12426 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 12135 12427 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12136 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification);12428 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual); 12137 12429 if (fIOWrite) 12138 12430 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, … … 12160 12452 * IN/OUT - I/O instruction. 12161 12453 */ 12162 Log4 (("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));12454 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12163 12455 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 12164 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification));12456 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual)); 12165 12457 if (fIOWrite) 12166 12458 { … … 12296 12588 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 12297 12589 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 12298 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification) ? "REP " : "",12590 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "", 12299 12591 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth)); 12300 12592 … … 12319 12611 12320 12612 /* Check if this task-switch occurred while delivery an event through the guest IDT. */ 12321 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12613 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12322 12614 AssertRCReturn(rc, rc); 12323 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)12615 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT) 12324 12616 { 12325 12617 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 12326 12618 AssertRCReturn(rc, rc); 12327 if (VMX_IDT_VECTORING_INFO_ VALID(pVmxTransient->uIdtVectoringInfo))12619 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 12328 12620 { 12329 12621 uint32_t uErrCode; … … 12350 12642 0 /* cbInstr */, uErrCode, GCPtrFaultAddress); 12351 12643 12352 Log4 (("Pending event on TaskSwitchuIntType=%#x uVector=%#x\n", uIntType, uVector));12644 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector)); 12353 12645 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 12354 12646 return VINF_EM_RAW_INJECT_TRPM_EVENT; … … 12406 12698 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */ 12407 12699 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12408 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12700 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12409 12701 AssertRCReturn(rc, rc); 12410 12702 12411 12703 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */ 12412 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual ification);12704 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual); 12413 12705 VBOXSTRICTRC rcStrict2; 12414 12706 switch (uAccessType) … … 12418 12710 { 12419 12711 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 12420 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification) != XAPIC_OFF_TPR,12712 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR, 12421 12713 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 12422 12714 12423 12715 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */ 12424 12716 GCPhys &= PAGE_BASE_GC_MASK; 12425 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification);12717 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual); 12426 12718 PVM pVM = pVCpu->CTX_SUFF(pVM); 12427 12719 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys, 12428 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification)));12720 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual))); 12429 12721 12430 12722 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 12494 12786 12495 12787 #ifdef VBOX_WITH_STATISTICS 12496 rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12788 rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12497 12789 AssertRCReturn(rc, rc); 12498 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)12790 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 12499 12791 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 12500 12792 else … … 12510 12802 */ 12511 12803 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12512 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12804 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12513 12805 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7); 12514 12806 AssertRCReturn(rc, rc); … … 12516 12808 12517 12809 PVM pVM = pVCpu->CTX_SUFF(pVM); 12518 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)12810 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 12519 12811 { 12520 12812 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12521 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification),12522 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual ification));12813 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual), 12814 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual)); 12523 12815 if (RT_SUCCESS(rc)) 12524 12816 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); … … 12528 12820 { 12529 12821 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12530 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual ification),12531 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification));12822 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual), 12823 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual)); 12532 12824 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 12533 12825 } … … 12596 12888 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12597 12889 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX); 12598 Log4 (("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));12890 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict))); 12599 12891 if ( rcStrict == VINF_SUCCESS 12600 12892 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT … … 12655 12947 RTGCPHYS GCPhys; 12656 12948 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 12657 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12949 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12658 12950 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12659 12951 AssertRCReturn(rc, rc); 12660 12952 12661 12953 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */ 12662 AssertMsg(((pVmxTransient->uExitQual ification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));12954 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual)); 12663 12955 12664 12956 RTGCUINT uErrorCode = 0; 12665 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_INSTR_FETCH)12957 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH) 12666 12958 uErrorCode |= X86_TRAP_PF_ID; 12667 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_DATA_WRITE)12959 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE) 12668 12960 uErrorCode |= X86_TRAP_PF_RW; 12669 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)12961 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT) 12670 12962 uErrorCode |= X86_TRAP_PF_P; 12671 12963 … … 12677 12969 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12678 12970 12679 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual ification, GCPhys,12680 uErrorCode,pCtx->cs.Sel, pCtx->rip));12971 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode, 12972 pCtx->cs.Sel, pCtx->rip)); 12681 12973 12682 12974 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys); … … 12700 12992 /** @} */ 12701 12993 12994 /** @name VM-exit exception handlers. 12995 * @{ 12996 */ 12702 12997 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 12703 12998 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ 12704 12999 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 12705 12706 /** @name VM-exit exception handlers.12707 * @{12708 */12709 13000 12710 13001 /** … … 12732 13023 } 12733 13024 12734 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12735 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13025 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13026 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12736 13027 return rc; 12737 13028 } … … 12758 13049 AssertRCReturn(rc, rc); 12759 13050 12760 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12761 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13051 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13052 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12762 13053 } 12763 13054 … … 12782 13073 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 12783 13074 12784 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12785 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13075 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13076 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12786 13077 return VINF_SUCCESS; 12787 13078 } … … 12800 13091 * for processing. 12801 13092 */ 12802 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);13093 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12803 13094 12804 13095 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ 12805 13096 uint64_t uDR6 = X86_DR6_INIT_VAL; 12806 uDR6 |= ( pVmxTransient->uExitQualification 12807 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 13097 uDR6 |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 12808 13098 12809 13099 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 12856 13146 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 12857 13147 AssertRCReturn(rc, rc); 12858 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12859 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13148 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13149 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12860 13150 return VINF_SUCCESS; 12861 13151 } … … 12899 13189 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip, 12900 13190 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel)); 12901 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12902 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13191 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13192 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12903 13193 return rc; 12904 13194 } … … 13166 13456 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 13167 13457 { 13168 Log4 (("hmR0VmxExitXcptGP: mode changed -> VINF_EM_RESCHEDULE\n"));13458 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13169 13459 /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */ 13170 13460 rc = VINF_EM_RESCHEDULE; … … 13217 13507 #endif 13218 13508 13219 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13220 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13509 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13510 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 13221 13511 return VINF_SUCCESS; 13222 13512 } … … 13230 13520 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13231 13521 PVM pVM = pVCpu->CTX_SUFF(pVM); 13232 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);13522 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13233 13523 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13234 13524 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); … … 13245 13535 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF)) 13246 13536 { 13247 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13248 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);13537 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */, 13538 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual); 13249 13539 } 13250 13540 else … … 13270 13560 AssertRCReturn(rc, rc); 13271 13561 13272 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 13273 pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3)); 13274 13275 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode); 13276 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), 13277 (RTGCPTR)pVmxTransient->uExitQualification); 13562 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel, 13563 pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3)); 13564 13565 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode); 13566 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual); 13278 13567 13279 13568 Log4Func(("#PF: rc=%Rrc\n", rc)); … … 13298 13587 TRPMResetTrap(pVCpu); 13299 13588 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */ 13300 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13301 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);13589 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */, 13590 uGstErrorCode, pVmxTransient->uExitQual); 13302 13591 } 13303 13592 else … … 13321 13610 /** @} */ 13322 13611 13612 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 13613 13614 /** @name Nested-guest VM-exit handlers. 13615 * @{ 13616 */ 13617 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13618 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13619 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13620 13621 /** 13622 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit. 13623 */ 13624 HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13625 { 13626 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13627 13628 /** @todo NSTVMX: Vmclear. */ 13629 hmR0VmxSetPendingXcptUD(pVCpu); 13630 return VINF_SUCCESS; 13631 } 13632 13633 13634 /** 13635 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit. 13636 */ 13637 HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13638 { 13639 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13640 13641 /** @todo NSTVMX: Vmlaunch. */ 13642 hmR0VmxSetPendingXcptUD(pVCpu); 13643 return VINF_SUCCESS; 13644 } 13645 13646 13647 /** 13648 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit. 13649 */ 13650 HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13651 { 13652 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13653 13654 /** @todo NSTVMX: Vmptrld. */ 13655 hmR0VmxSetPendingXcptUD(pVCpu); 13656 return VINF_SUCCESS; 13657 } 13658 13659 13660 /** 13661 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit. 13662 */ 13663 HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13664 { 13665 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13666 13667 /** @todo NSTVMX: Vmptrst. */ 13668 hmR0VmxSetPendingXcptUD(pVCpu); 13669 return VINF_SUCCESS; 13670 } 13671 13672 13673 /** 13674 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit. 13675 */ 13676 HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13677 { 13678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13679 13680 /** @todo NSTVMX: Vmread. */ 13681 hmR0VmxSetPendingXcptUD(pVCpu); 13682 return VINF_SUCCESS; 13683 } 13684 13685 13686 /** 13687 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit. 13688 */ 13689 HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13690 { 13691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13692 13693 /** @todo NSTVMX: Vmresume. */ 13694 hmR0VmxSetPendingXcptUD(pVCpu); 13695 return VINF_SUCCESS; 13696 } 13697 13698 13699 /** 13700 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit. 13701 */ 13702 HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13703 { 13704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13705 13706 /** @todo NSTVMX: Vmwrite. */ 13707 hmR0VmxSetPendingXcptUD(pVCpu); 13708 return VINF_SUCCESS; 13709 } 13710 13711 13712 /** 13713 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit. 13714 */ 13715 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13716 { 13717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13718 13719 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13720 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13721 AssertRCReturn(rc, rc); 13722 13723 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr); 13724 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13725 { 13726 /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */ 13727 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT); 13728 } 13729 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13730 { 13731 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13732 rcStrict = VINF_SUCCESS; 13733 } 13734 return rcStrict; 13735 } 13736 13737 13738 /** 13739 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit. 13740 */ 13741 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13742 { 13743 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13744 13745 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13746 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13747 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13748 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13749 AssertRCReturn(rc, rc); 13750 13751 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient); 13752 if (rcStrict == VINF_SUCCESS) 13753 { /* likely */ } 13754 else if (rcStrict == VINF_HM_PENDING_XCPT) 13755 { 13756 Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13757 return VINF_SUCCESS; 13758 } 13759 else 13760 { 13761 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13762 return rcStrict; 13763 } 13764 13765 RTGCPTR GCPtrVmxon; 13766 PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo; 13767 RTGCPTR const GCPtrDisp = pVmxTransient->uExitQual; 13768 rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/, &GCPtrVmxon); 13769 if (rcStrict == VINF_SUCCESS) 13770 { /* likely */ } 13771 else if (rcStrict == VINF_HM_PENDING_XCPT) 13772 { 13773 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13774 return VINF_SUCCESS; 13775 } 13776 else 13777 { 13778 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13779 return rcStrict; 13780 } 13781 13782 rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, GCPtrVmxon, pExitInstrInfo->u, GCPtrDisp); 13783 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13784 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 13785 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13786 { 13787 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13788 rcStrict = VINF_SUCCESS; 13789 } 13790 return rcStrict; 13791 } 13792 13793 /** @} */ 13794 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 13795
Note:
See TracChangeset
for help on using the changeset viewer.