Changeset 73983 in vbox for trunk/src/VBox
- Timestamp:
- Aug 31, 2018 8:17:31 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r73937 r73983 102 102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ), 103 103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ), 104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ) 104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ), 105 /* VMREAD. */ 106 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Cpl , "Cpl" ), 107 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_FieldInvalid , "FieldInvalid" ), 108 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid"), 109 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrInvalid , "PtrInvalid" ), 110 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrMap , "PtrMap" ), 111 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Success , "Success" ) 105 112 /* kVmxVInstrDiag_Last */ 106 113 }; -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r73961 r73983 15540 15540 15541 15541 /** 15542 * Interface for HM and EM to emulate the VM WRITEinstruction.15542 * Interface for HM and EM to emulate the VMREAD instruction. 15543 15543 * 15544 15544 * @returns Strict VBox status code. 15545 15545 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15546 * @param cbInstr The instruction length in bytes.15547 * @param u64Val The value to write or guest linear address of the value15548 * to write.15549 * @param uFieldEnc The VMCS field encoding.15550 15546 * @param pExitInfo Pointer to the VM-exit information struct. 15551 15547 * @thread EMT(pVCpu) 15552 15548 */ 15553 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint64_t u64Val, uint32_t uFieldEnc, 15554 PCVMXVEXITINFO pExitInfo) 15555 { 15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15549 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15550 { 15551 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15557 15552 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15558 15553 Assert(pExitInfo); 15559 15554 15560 uint8_t const iEffSeg = pExitInfo->ExitInstrInfo.VmreadVmwrite.iSegReg;15561 IEMMODE const enmEffAddrMode = (IEMMODE)pExitInfo->ExitInstrInfo.VmreadVmwrite.u3AddrSize;15562 15563 15555 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15556 15557 VBOXSTRICTRC rcStrict; 15558 uint8_t const cbInstr = pExitInfo->cbInstr; 15559 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2); 15560 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand) 15561 { 15562 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 15563 { 15564 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1); 15565 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo); 15566 } 15567 else 15568 { 15569 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1); 15570 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo); 15571 } 15572 } 15573 else 15574 { 15575 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr; 15576 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg; 15577 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize; 15578 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo); 15579 } 15580 if (pVCpu->iem.s.cActiveMappings) 15581 iemMemRollback(pVCpu); 15582 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15583 } 15584 15585 15586 /** 15587 * Interface for HM and EM to emulate the VMWRITE instruction. 15588 * 15589 * @returns Strict VBox status code. 15590 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15591 * @param pExitInfo Pointer to the VM-exit information struct. 15592 * @thread EMT(pVCpu) 15593 */ 15594 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15595 { 15596 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15597 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15598 Assert(pExitInfo); 15599 15600 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15601 15602 uint64_t u64Val; 15603 uint8_t iEffSeg; 15604 IEMMODE enmEffAddrMode; 15605 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand) 15606 { 15607 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1); 15608 iEffSeg = UINT8_MAX; 15609 enmEffAddrMode = UINT8_MAX; 15610 } 15611 else 15612 { 15613 u64Val = pExitInfo->GCPtrEffAddr; 15614 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg; 15615 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize; 15616 } 15617 uint8_t const cbInstr = pExitInfo->cbInstr; 15618 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2); 15564 15619 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo); 15565 15620 if (pVCpu->iem.s.cActiveMappings) … … 15574 15629 * @returns Strict VBox status code. 15575 15630 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15576 * @param cbInstr The instruction length in bytes.15577 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.15578 * @param GCPtrVmcs The linear address of the VMCS pointer.15579 15631 * @param pExitInfo Pointer to the VM-exit information struct. 15580 15632 * @thread EMT(pVCpu) 15581 15633 */ 15582 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,15583 PCVMXVEXITINFO pExitInfo) 15584 { 15585 IEMEXEC_ASSERT_INSTR_LEN_RETURN( cbInstr, 3);15634 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15635 { 15636 Assert(pExitInfo); 15637 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15586 15638 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15587 Assert(pExitInfo);15588 15639 15589 15640 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15641 15642 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg; 15643 uint8_t const cbInstr = pExitInfo->cbInstr; 15644 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 15590 15645 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 15591 15646 if (pVCpu->iem.s.cActiveMappings) … … 15600 15655 * @returns Strict VBox status code. 15601 15656 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15602 * @param cbInstr The instruction length in bytes.15603 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.15604 * @param GCPtrVmcs The linear address of the VMCS pointer.15605 15657 * @param pExitInfo Pointer to the VM-exit information struct. 15606 15658 * @thread EMT(pVCpu) 15607 15659 */ 15608 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,15609 PCVMXVEXITINFO pExitInfo) 15610 { 15611 IEMEXEC_ASSERT_INSTR_LEN_RETURN( cbInstr, 3);15660 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15661 { 15662 Assert(pExitInfo); 15663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15612 15664 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15613 Assert(pExitInfo);15614 15665 15615 15666 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15667 15668 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg; 15669 uint8_t const cbInstr = pExitInfo->cbInstr; 15670 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 15616 15671 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 15617 15672 if (pVCpu->iem.s.cActiveMappings) … … 15626 15681 * @returns Strict VBox status code. 15627 15682 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15628 * @param cbInstr The instruction length in bytes.15629 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.15630 * @param GCPtrVmcs The linear address of the VMCS pointer.15631 15683 * @param pExitInfo Pointer to the VM-exit information struct. 15632 15684 * @thread EMT(pVCpu) 15633 15685 */ 15634 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,15635 PCVMXVEXITINFO pExitInfo) 15636 { 15637 IEMEXEC_ASSERT_INSTR_LEN_RETURN( cbInstr, 3);15686 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15687 { 15688 Assert(pExitInfo); 15689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15638 15690 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15639 Assert(pExitInfo);15640 15691 15641 15692 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15693 15694 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg; 15695 uint8_t const cbInstr = pExitInfo->cbInstr; 15696 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 15642 15697 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 15643 15698 if (pVCpu->iem.s.cActiveMappings) … … 15652 15707 * @returns Strict VBox status code. 15653 15708 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15654 * @param cbInstr The instruction length in bytes. 15655 * @param iEffSeg The effective segment register to use with @a 15656 * GCPtrVmxon. 15657 * @param GCPtrVmxon The linear address of the VMXON pointer. 15658 * @param pExitInfo The VM-exit instruction information struct. 15709 * @param pExitInfo Pointer to the VM-exit information struct. 15659 15710 * @thread EMT(pVCpu) 15660 15711 */ 15661 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,15662 PCVMXVEXITINFO pExitInfo) 15663 { 15664 IEMEXEC_ASSERT_INSTR_LEN_RETURN( cbInstr, 3);15712 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15713 { 15714 Assert(pExitInfo); 15715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3); 15665 15716 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15666 Assert(pExitInfo);15667 15717 15668 15718 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15719 15720 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg; 15721 uint8_t const cbInstr = pExitInfo->cbInstr; 15722 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr; 15669 15723 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo); 15670 15724 if (pVCpu->iem.s.cActiveMappings) -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r73970 r73983 654 654 * @param pVCpu The cross context virtual CPU structure. 655 655 * @param uExitReason The VM-exit reason. 656 * @param InstrIdThe VM-exit instruction identity (VMX_INSTR_ID_XXX) if656 * @param uInstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if 657 657 * any. Pass VMX_INSTR_ID_NONE otherwise. 658 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is 659 * a read or write. 658 660 * @param pGCPtrDisp Where to store the displacement field. Optional, can be 659 661 * NULL. 660 662 */ 661 IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID InstrId, PRTGCPTR pGCPtrDisp) 663 IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead, 664 PRTGCPTR pGCPtrDisp) 662 665 { 663 666 RTGCPTR GCPtrDisp; … … 675 678 /* 676 679 * ModR/M indicates register addressing. 680 * 681 * The primary/secondary register operands are reported in the iReg1 or iReg2 682 * fields depending on whether it is a read/write form. 677 683 */ 684 uint8_t idxReg1; 685 uint8_t idxReg2; 686 if (fPrimaryOpRead) 687 { 688 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 689 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB; 690 } 691 else 692 { 693 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB; 694 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 695 } 678 696 ExitInstrInfo.All.u2Scaling = 0; 679 ExitInstrInfo.All.iReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;697 ExitInstrInfo.All.iReg1 = idxReg1; 680 698 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 681 699 ExitInstrInfo.All.fIsRegOperand = 1; … … 686 704 ExitInstrInfo.All.iBaseReg = 0; 687 705 ExitInstrInfo.All.fBaseRegInvalid = 1; 688 ExitInstrInfo.All.iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;706 ExitInstrInfo.All.iReg2 = idxReg2; 689 707 690 708 /* Displacement not applicable for register addressing. */ … … 701 719 uint8_t iBaseReg = 0; 702 720 uint8_t iIdxReg = 0; 703 uint8_t iReg2 = 0;704 721 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 705 722 { … … 747 764 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */ 748 765 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */ 749 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);750 766 } 751 767 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) … … 819 835 820 836 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */ 821 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);822 837 } 823 838 else … … 908 923 909 924 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp; 910 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 911 } 912 925 } 926 927 /* 928 * The primary or secondary register operand is reported in iReg2 depending 929 * on whether the primary operand is in read/write form. 930 */ 931 uint8_t idxReg2; 932 if (fPrimaryOpRead) 933 { 934 idxReg2 = bRm & X86_MODRM_RM_MASK; 935 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 936 idxReg2 |= pVCpu->iem.s.uRexB; 937 } 938 else 939 { 940 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK; 941 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 942 idxReg2 |= pVCpu->iem.s.uRexReg; 943 } 913 944 ExitInstrInfo.All.u2Scaling = uScale; 914 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory instructions. */945 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */ 915 946 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 916 947 ExitInstrInfo.All.fIsRegOperand = 0; … … 921 952 ExitInstrInfo.All.iBaseReg = iBaseReg; 922 953 ExitInstrInfo.All.iIdxReg = !fBaseRegValid; 923 ExitInstrInfo.All.iReg2 = i Reg2;954 ExitInstrInfo.All.iReg2 = idxReg2; 924 955 } 925 956 … … 932 963 case VMX_EXIT_XDTR_ACCESS: 933 964 { 934 Assert(VMX_INSTR_ID_IS_VALID( InstrId));935 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID( InstrId);965 Assert(VMX_INSTR_ID_IS_VALID(uInstrId)); 966 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId); 936 967 ExitInstrInfo.GdtIdt.u2Undef0 = 0; 937 968 break; … … 940 971 case VMX_EXIT_TR_ACCESS: 941 972 { 942 Assert(VMX_INSTR_ID_IS_VALID( InstrId));943 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID( InstrId);973 Assert(VMX_INSTR_ID_IS_VALID(uInstrId)); 974 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId); 944 975 ExitInstrInfo.LdtTr.u2Undef0 = 0; 945 976 break; … … 1033 1064 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu); 1034 1065 return rc; 1066 } 1067 1068 1069 /** 1070 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP. 1071 * 1072 * @param pVCpu The cross context virtual CPU structure. 1073 */ 1074 DECLINLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr) 1075 { 1076 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Success; 1077 iemVmxVmSucceed(pVCpu); 1078 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1079 } 1080 1081 1082 /** 1083 * VMREAD common (memory/register) instruction execution worker 1084 * 1085 * @param pVCpu The cross context virtual CPU structure. 1086 * @param cbInstr The instruction length. 1087 * @param pu64Dst Where to write the VMCS value (only updated when 1088 * VINF_SUCCESS is returned). 1089 * @param uFieldEnc The VMCS field encoding. 1090 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1091 * be NULL. 1092 */ 1093 IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc, 1094 PCVMXVEXITINFO pExitInfo) 1095 { 1096 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 1097 { 1098 RT_NOREF(pExitInfo); RT_NOREF(cbInstr); 1099 /** @todo NSTVMX: intercept. */ 1100 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */ 1101 } 1102 1103 /* CPL. */ 1104 if (CPUMGetGuestCPL(pVCpu) > 0) 1105 { 1106 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 1107 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Cpl; 1108 return iemRaiseGeneralProtectionFault0(pVCpu); 1109 } 1110 1111 /* VMCS pointer in root mode. */ 1112 if ( IEM_IS_VMX_ROOT_MODE(pVCpu) 1113 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 1114 { 1115 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu))); 1116 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrInvalid; 1117 iemVmxVmFailInvalid(pVCpu); 1118 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1119 return VINF_SUCCESS; 1120 } 1121 1122 /* VMCS-link pointer in non-root mode. */ 1123 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu) 1124 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 1125 { 1126 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu))); 1127 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_LinkPtrInvalid; 1128 iemVmxVmFailInvalid(pVCpu); 1129 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1130 return VINF_SUCCESS; 1131 } 1132 1133 /* Supported VMCS field. */ 1134 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc)) 1135 { 1136 Log(("vmread: VMCS field %#x invalid -> VMFail\n", uFieldEnc)); 1137 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_FieldInvalid; 1138 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT); 1139 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1140 return VINF_SUCCESS; 1141 } 1142 1143 /* 1144 * Setup reading from the current or shadow VMCS. 1145 */ 1146 uint8_t *pbVmcs; 1147 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 1148 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs); 1149 else 1150 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1151 Assert(pbVmcs); 1152 1153 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc; 1154 uint8_t const uWidth = pFieldEnc->n.u2Width; 1155 uint8_t const uType = pFieldEnc->n.u2Type; 1156 uint8_t const uWidthType = (uWidth << 2) | uType; 1157 uint8_t const uIndex = pFieldEnc->n.u8Index; 1158 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2); 1159 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex]; 1160 1161 /* 1162 * Read the VMCS component based on the field's effective width. 1163 * 1164 * The effective width is 64-bit fields adjusted to 32-bits if the access-type 1165 * indicates high bits (little endian). 1166 * 1167 * Note! The caller is responsible to trim the result and update registers 1168 * or memory locations are required. Here we just zero-extend to the largest 1169 * type (i.e. 64-bits). 1170 */ 1171 uint8_t *pbField = pbVmcs + offField; 1172 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc); 1173 switch (uEffWidth) 1174 { 1175 case VMX_VMCS_ENC_WIDTH_64BIT: 1176 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break; 1177 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break; 1178 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break; 1179 } 1180 return VINF_SUCCESS; 1181 } 1182 1183 1184 /** 1185 * VMREAD (64-bit register) instruction execution worker. 1186 * 1187 * @param pVCpu The cross context virtual CPU structure. 1188 * @param cbInstr The instruction length. 1189 * @param pu64Dst Where to store the VMCS field's value. 1190 * @param uFieldEnc The VMCS field encoding. 1191 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1192 * be NULL. 1193 */ 1194 IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc, 1195 PCVMXVEXITINFO pExitInfo) 1196 { 1197 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo); 1198 if (rcStrict == VINF_SUCCESS) 1199 { 1200 iemVmxVmreadSuccess(pVCpu, cbInstr); 1201 return VINF_SUCCESS; 1202 } 1203 1204 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1205 return rcStrict; 1206 } 1207 1208 1209 /** 1210 * VMREAD (32-bit register) instruction execution worker. 1211 * 1212 * @param pVCpu The cross context virtual CPU structure. 1213 * @param cbInstr The instruction length. 1214 * @param pu32Dst Where to store the VMCS field's value. 1215 * @param uFieldEnc The VMCS field encoding. 1216 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1217 * be NULL. 1218 */ 1219 IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint32_t uFieldEnc, 1220 PCVMXVEXITINFO pExitInfo) 1221 { 1222 uint64_t u64Dst; 1223 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo); 1224 if (rcStrict == VINF_SUCCESS) 1225 { 1226 *pu32Dst = u64Dst; 1227 iemVmxVmreadSuccess(pVCpu, cbInstr); 1228 return VINF_SUCCESS; 1229 } 1230 1231 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1232 return rcStrict; 1233 } 1234 1235 1236 /** 1237 * VMREAD (memory) instruction execution worker. 1238 * 1239 * @param pVCpu The cross context virtual CPU structure. 1240 * @param cbInstr The instruction length. 1241 * @param iEffSeg The effective segment register to use with @a u64Val. 1242 * Pass UINT8_MAX if it is a register access. 1243 * @param enmEffAddrMode The effective addressing mode (only used with memory 1244 * operand). 1245 * @param GCPtrDst The guest linear address to store the VMCS field's 1246 * value. 1247 * @param uFieldEnc The VMCS field encoding. 1248 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1249 * be NULL. 1250 */ 1251 IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, 1252 RTGCPTR GCPtrDst, uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo) 1253 { 1254 uint64_t u64Dst; 1255 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo); 1256 if (rcStrict == VINF_SUCCESS) 1257 { 1258 /* 1259 * Write the VMCS field's value to the location specified in guest-memory. 1260 * 1261 * The pointer size depends on the address size (address-size prefix allowed). 1262 * The operand size depends on IA-32e mode (operand-size prefix not allowed). 1263 */ 1264 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) }; 1265 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks)); 1266 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode]; 1267 1268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1269 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst); 1270 else 1271 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst); 1272 if (rcStrict == VINF_SUCCESS) 1273 { 1274 iemVmxVmreadSuccess(pVCpu, cbInstr); 1275 return VINF_SUCCESS; 1276 } 1277 1278 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict))); 1279 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrMap; 1280 return rcStrict; 1281 } 1282 1283 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1284 return rcStrict; 1035 1285 } 1036 1286 … … 1049 1299 * operand. 1050 1300 * @param uFieldEnc The VMCS field encoding. 1051 * @param pExitInfo Pointer to the VM-exit information struct. 1301 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1302 * be NULL. 1052 1303 */ 1053 1304 IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val, … … 1058 1309 RT_NOREF(pExitInfo); 1059 1310 /** @todo NSTVMX: intercept. */ 1060 /** @todo NSTVMX: VMCS shadowing intercept (VM READ/VMWRITE bitmap). */1311 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */ 1061 1312 } 1062 1313 … … 1085 1336 { 1086 1337 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu))); 1087 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_ PtrInvalid;1338 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_LinkPtrInvalid; 1088 1339 iemVmxVmFailInvalid(pVCpu); 1089 1340 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 1091 1342 } 1092 1343 1093 /* If the VMWRITE instruction references memory, access the specified inmemory operand. */1344 /* If the VMWRITE instruction references memory, access the specified memory operand. */ 1094 1345 bool const fIsRegOperand = iEffSeg == UINT8_MAX; 1095 1346 if (!fIsRegOperand) … … 1116 1367 } 1117 1368 } 1369 else 1370 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand); 1118 1371 1119 1372 /* Supported VMCS field. */ … … 1132 1385 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll) 1133 1386 { 1134 Log(("vmwrite: Write to read-only VMCS component -> VMFail\n", uFieldEnc));1387 Log(("vmwrite: Write to read-only VMCS component %#x -> VMFail\n", uFieldEnc)); 1135 1388 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldRo; 1136 1389 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT); … … 1346 1599 * @param cbInstr The instruction length. 1347 1600 * @param GCPtrVmcs The linear address of the current VMCS pointer. 1348 * @param pExitInfo Pointer to the virtual VM-exit information struct.1349 * Optional, canbe NULL.1601 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can 1602 * be NULL. 1350 1603 * 1351 1604 * @remarks Common VMX instruction checks are already expected to by the caller, … … 1757 2010 IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint32_t, uFieldEnc) 1758 2011 { 1759 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /*iEffSeg*/, IEMMODE_64BIT /* N/A */, u64Val, uFieldEnc, NULL /* pExitInfo */); 2012 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, uFieldEnc, 2013 NULL /* pExitInfo */); 1760 2014 } 1761 2015 … … 1769 2023 } 1770 2024 2025 2026 /** 2027 * Implements 'VMREAD' 64-bit register. 2028 */ 2029 IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint32_t, uFieldEnc) 2030 { 2031 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, NULL /* pExitInfo */); 2032 } 2033 2034 2035 /** 2036 * Implements 'VMREAD' 32-bit register. 2037 */ 2038 IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, uFieldEnc) 2039 { 2040 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, NULL /* pExitInfo */); 2041 } 2042 2043 2044 /** 2045 * Implements 'VMREAD' memory. 2046 */ 2047 IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, uFieldEnc) 2048 { 2049 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, NULL /* pExitInfo */); 2050 } 2051 2052 1771 2053 #endif 1772 2054 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r73959 r73983 4279 4279 4280 4280 /** Opcode 0x0f 0x78 - VMREAD Ey, Gy */ 4281 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4282 FNIEMOP_DEF(iemOp_vmread_Ey_Gy) 4283 { 4284 IEMOP_MNEMONIC(vmread, "vmread Ey,Gy"); 4285 IEMOP_HLP_IN_VMX_OPERATION(); 4286 IEMOP_HLP_VMX_INSTR(); 4287 IEMMODE const enmEffOpSize = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? IEMMODE_64BIT : IEMMODE_32BIT; 4288 4289 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 4290 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 4291 { 4292 /* 4293 * Register, register. 4294 */ 4295 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4296 if (enmEffOpSize == IEMMODE_64BIT) 4297 { 4298 IEM_MC_BEGIN(2, 0); 4299 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 4300 IEM_MC_ARG(uint64_t, u64Enc, 1); 4301 IEM_MC_FETCH_GREG_U64(u64Enc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4302 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4303 IEM_MC_CALL_CIMPL_2(iemCImpl_vmread64_reg, pu64Dst, u64Enc); 4304 IEM_MC_END(); 4305 } 4306 else 4307 { 4308 IEM_MC_BEGIN(2, 0); 4309 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 4310 IEM_MC_ARG(uint32_t, u32Enc, 1); 4311 IEM_MC_FETCH_GREG_U32(u32Enc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4312 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4313 IEM_MC_CALL_CIMPL_2(iemCImpl_vmread32_reg, pu32Dst, u32Enc); 4314 IEM_MC_END(); 4315 } 4316 } 4317 else 4318 { 4319 /* 4320 * Register, memory. 4321 */ 4322 if (enmEffOpSize == IEMMODE_64BIT) 4323 { 4324 IEM_MC_BEGIN(4, 0); 4325 IEM_MC_ARG(uint8_t, iEffSeg, 0); 4326 IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode, 1); 4327 IEM_MC_ARG(RTGCPTR, GCPtrVal, 2); 4328 IEM_MC_ARG(uint64_t, u64Enc, 3); 4329 IEM_MC_FETCH_GREG_U64(u64Enc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4330 IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0); 4331 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4332 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 4333 IEM_MC_CALL_CIMPL_4(iemCImpl_vmread_mem, iEffSeg, enmEffAddrMode, GCPtrVal, u64Enc); 4334 IEM_MC_END(); 4335 } 4336 else 4337 { 4338 IEM_MC_BEGIN(4, 0); 4339 IEM_MC_ARG(uint8_t, iEffSeg, 0); 4340 IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode, 1); 4341 IEM_MC_ARG(RTGCPTR, GCPtrVal, 2); 4342 IEM_MC_ARG(uint32_t, u32Enc, 3); 4343 IEM_MC_FETCH_GREG_U32(u32Enc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4344 IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0); 4345 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4346 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 4347 IEM_MC_CALL_CIMPL_4(iemCImpl_vmread_mem, iEffSeg, enmEffAddrMode, GCPtrVal, u32Enc); 4348 IEM_MC_END(); 4349 } 4350 } 4351 return VINF_SUCCESS; 4352 } 4353 #else 4281 4354 FNIEMOP_STUB(iemOp_vmread_Ey_Gy); 4355 #endif 4356 4282 4357 /* Opcode 0x66 0x0f 0x78 - AMD Group 17 */ 4283 4358 FNIEMOP_STUB(iemOp_AmdGrp17); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73959 r73983 183 183 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 184 184 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 185 186 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 187 /** Macro that does the necessary privilege checks and intercepted VM-exits for 188 * guests that attempted to execute a VMX instruction. */ 189 #define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \ 190 do \ 191 { \ 192 VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \ 193 if (rcStrictTmp == VINF_SUCCESS) \ 194 { /* likely */ } \ 195 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \ 196 { \ 197 Assert((a_pVCpu)->hm.s.Event.fPending); \ 198 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \ 199 return VINF_SUCCESS; \ 200 } \ 201 else \ 202 { \ 203 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \ 204 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \ 205 } \ 206 } while (0) 207 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 185 208 186 209 … … 5846 5869 5847 5870 /** 5848 * Decodes the memory operand of a VM-exit due to instruction execution.5871 * Decodes the memory operand of an instruction that caused a VM-exit. 5849 5872 * 5850 5873 * The VM-exit qualification field provides the displacement field for memory … … 5857 5880 * @param pVCpu The cross context virtual CPU structure. 5858 5881 * @param pExitInstrInfo Pointer to the VM-exit instruction information. 5859 * @param fIs Write Whether the operand is a destination memory operand5860 * (i.e. writeable memory location) or not.5882 * @param fIsDstOperand Whether the operand is a destination memory 5883 * operand (i.e. writeable memory location) or not. 5861 5884 * @param GCPtrDisp The instruction displacement field, if any. For 5862 5885 * RIP-relative addressing pass RIP + displacement here. 5863 * @param pGCPtrMem Where to store the destination memory operand.5864 */ 5865 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite,5866 PRTGCPTR pGCPtrMem)5886 * @param pGCPtrMem Where to store the effective destination memory address. 5887 */ 5888 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, 5889 bool fIsDstOperand, PRTGCPTR pGCPtrMem) 5867 5890 { 5868 5891 Assert(pExitInstrInfo); … … 5951 5974 { 5952 5975 /* Check permissions for the data segment. */ 5953 if ( fIs Write5976 if ( fIsDstOperand 5954 5977 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE)) 5955 5978 { … … 5996 6019 { 5997 6020 /* Check permissions for the code segment. */ 5998 if ( fIs Write6021 if ( fIsDstOperand 5999 6022 || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) 6000 6023 { … … 6040 6063 * 6041 6064 * @param pVCpu The cross context virtual CPU structure. 6042 * @param pVmxTransient Pointer to the VMX transient structure.6065 * @param uExitReason The VM-exit reason. 6043 6066 * 6044 6067 * @todo NstVmx: Document other error codes when VM-exit is implemented. 6045 6068 * @remarks No-long-jump zone!!! 6046 6069 */ 6047 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)6070 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, uint32_t uExitReason) 6048 6071 { 6049 6072 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS … … 6059 6082 } 6060 6083 6061 if ( pVmxTransient->uExitReason == VMX_EXIT_VMXON)6084 if (uExitReason == VMX_EXIT_VMXON) 6062 6085 { 6063 6086 /* … … 13422 13445 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13423 13446 13424 /** @todo NSTVMX: Vmwrite. */13425 hmR0VmxSetPendingXcptUD(pVCpu);13426 return VINF_SUCCESS;13427 }13428 13429 13430 /**13431 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.13432 */13433 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)13434 {13435 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);13436 13437 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);13438 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);13439 AssertRCReturn(rc, rc);13440 13441 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);13442 if (RT_LIKELY(rcStrict == VINF_SUCCESS))13443 {13444 /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */13445 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);13446 }13447 else if (rcStrict == VINF_IEM_RAISED_XCPT)13448 {13449 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13450 rcStrict = VINF_SUCCESS;13451 }13452 return rcStrict;13453 }13454 13455 13456 /**13457 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.13458 */13459 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)13460 {13461 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);13462 13463 13447 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13464 13448 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); … … 13467 13451 AssertRCReturn(rc, rc); 13468 13452 13469 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient); 13453 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason); 13454 13455 VMXVEXITINFO ExitInfo; 13456 RT_ZERO(ExitInfo); 13457 ExitInfo.uReason = pVmxTransient->uExitReason; 13458 ExitInfo.u64Qual = pVmxTransient->uExitQual; 13459 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u; 13460 ExitInfo.cbInstr = pVmxTransient->cbInstr; 13461 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand) 13462 { 13463 RTGCPTR GCPtrVal; 13464 VBOXSTRICTRC rcStrict = hmR0VmxDecodeMemOperand(pVCpu, &ExitInfo.InstrInfo, ExitInfo.u64Qual, false /* fIsDstOperand */, 13465 &GCPtrVal); 13466 if (rcStrict == VINF_SUCCESS) 13467 { /* likely */ } 13468 else if (rcStrict == VINF_HM_PENDING_XCPT) 13469 { 13470 Assert(pVCpu->hm.s.Event.fPending); 13471 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", 13472 VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13473 return VINF_SUCCESS; 13474 } 13475 else 13476 { 13477 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13478 return rcStrict; 13479 } 13480 ExitInfo.GCPtrEffAddr = GCPtrVal; 13481 } 13482 13483 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo); 13484 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13485 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 13486 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13487 { 13488 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13489 rcStrict = VINF_SUCCESS; 13490 } 13491 return rcStrict; 13492 } 13493 13494 13495 /** 13496 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit. 13497 */ 13498 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13499 { 13500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13501 13502 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13503 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13504 AssertRCReturn(rc, rc); 13505 13506 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason); 13507 13508 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr); 13509 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13510 { 13511 /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */ 13512 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT); 13513 } 13514 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13515 { 13516 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13517 rcStrict = VINF_SUCCESS; 13518 } 13519 return rcStrict; 13520 } 13521 13522 13523 /** 13524 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit. 13525 */ 13526 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13527 { 13528 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13529 13530 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13531 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13532 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13533 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13534 AssertRCReturn(rc, rc); 13535 13536 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason); 13537 13538 VMXVEXITINFO ExitInfo; 13539 RT_ZERO(ExitInfo); 13540 ExitInfo.uReason = pVmxTransient->uExitReason; 13541 ExitInfo.u64Qual = pVmxTransient->uExitQual; 13542 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u; 13543 ExitInfo.cbInstr = pVmxTransient->cbInstr; 13544 13545 RTGCPTR GCPtrVmxon; 13546 VBOXSTRICTRC rcStrict = hmR0VmxDecodeMemOperand(pVCpu, &ExitInfo.InstrInfo, ExitInfo.u64Qual, false /* fIsDstOperand */, 13547 &GCPtrVmxon); 13470 13548 if (rcStrict == VINF_SUCCESS) 13471 13549 { /* likely */ } 13472 13550 else if (rcStrict == VINF_HM_PENDING_XCPT) 13473 13551 { 13474 Log4Func((" Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));13552 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13475 13553 return VINF_SUCCESS; 13476 13554 } … … 13480 13558 return rcStrict; 13481 13559 } 13482 13483 RTGCPTR GCPtrVmxon; 13484 PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo; 13485 RTGCPTR const GCPtrDisp = pVmxTransient->uExitQual; 13486 rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/, &GCPtrVmxon); 13487 if (rcStrict == VINF_SUCCESS) 13488 { /* likely */ } 13489 else if (rcStrict == VINF_HM_PENDING_XCPT) 13490 { 13491 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13492 return VINF_SUCCESS; 13493 } 13494 else 13495 { 13496 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13497 return rcStrict; 13498 } 13499 13500 VMXVEXITINFO ExitInfo; 13501 RT_ZERO(ExitInfo); 13502 ExitInfo.ExitInstrInfo.u = pExitInstrInfo->u; 13503 ExitInfo.u64ExitQual = GCPtrDisp; 13504 uint8_t const iEffSeg = pExitInstrInfo->VmreadVmwrite.iSegReg; 13505 rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, iEffSeg, GCPtrVmxon, &ExitInfo); 13560 ExitInfo.GCPtrEffAddr = GCPtrVmxon; 13561 13562 rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo); 13506 13563 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13507 13564 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
Note:
See TracChangeset
for help on using the changeset viewer.