- Timestamp:
- Dec 9, 2021 11:08:31 AM (3 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r92706 r92844 181 181 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending) 182 182 * VMX_EXIT_ERR_MACHINE_CHECK (we never need to raise this?) 183 * VMX_EXIT_EPT_VIOLATION184 * VMX_EXIT_EPT_MISCONFIG185 183 * VMX_EXIT_INVEPT 186 184 * VMX_EXIT_RDRAND … … 5933 5931 * @returns VBox status code. 5934 5932 * @param pVCpu The cross context virtual CPU structure. 5933 * @param uEptPtr The EPT pointer to check. 5935 5934 * @param penmVmxDiag Where to store the diagnostic reason on failure (not 5936 5935 * updated on success). Optional, can be NULL. 5937 5936 */ 5938 IEM_STATIC int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, VMXVDIAG *penmVmxDiag)5937 IEM_STATIC int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr, VMXVDIAG *penmVmxDiag) 5939 5938 { 5940 5939 VMXVDIAG enmVmxDiag; 5941 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;5942 5940 5943 5941 /* Reserved bits. */ 5944 5942 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth; 5945 5943 uint64_t const fValidMask = VMX_EPTP_VALID_MASK & ~(UINT64_MAX << cMaxPhysAddrWidth); 5946 if ( pVmcs->u64EptPtr.u& fValidMask)5944 if (uEptPtr & fValidMask) 5947 5945 { 5948 5946 /* Memory Type. */ 5949 5947 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps; 5950 uint8_t const fMemType = RT_BF_GET( pVmcs->u64EptPtr.u, VMX_BF_EPTP_MEMTYPE);5948 uint8_t const fMemType = RT_BF_GET(uEptPtr, VMX_BF_EPTP_MEMTYPE); 5951 5949 if ( ( fMemType == VMX_EPTP_MEMTYPE_WB 5952 5950 && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_WB)) … … 5961 5959 */ 5962 5960 Assert(RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)); 5963 if (RT_BF_GET( pVmcs->u64EptPtr.u, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3)5961 if (RT_BF_GET(uEptPtr, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3) 5964 5962 { 5965 5963 /* Access and dirty bits support in EPT structures. */ 5966 if ( !RT_BF_GET( pVmcs->u64EptPtr.u, VMX_BF_EPTP_ACCESS_DIRTY)5964 if ( !RT_BF_GET(uEptPtr, VMX_BF_EPTP_ACCESS_DIRTY) 5967 5965 || RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY)) 5968 5966 return VINF_SUCCESS; … … 6196 6194 { 6197 6195 VMXVDIAG enmVmxDiag; 6198 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, &enmVmxDiag);6196 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, pVmcs->u64EptPtr.u, &enmVmxDiag); 6199 6197 if (RT_SUCCESS(rc)) 6200 6198 { /* likely */ } … … 7589 7587 return VINF_SUCCESS; 7590 7588 } 7591 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED, 7592 pVmcs->u64RoExitQual.u); 7589 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u); 7593 7590 } 7594 7591 } 7595 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED, 7596 pVmcs->u64RoExitQual.u); 7592 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u); 7597 7593 } 7598 7594 … … 8462 8458 */ 8463 8459 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps; 8464 uint8_t const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 8465 uint8_t const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX); 8466 uint8_t const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX); 8467 uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS); 8468 if ( (fTypeIndivAddr && u64InvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR) 8469 || (fTypeSingleCtx && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 8470 || (fTypeAllCtx && u64InvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS) 8471 || (fTypeSingleCtxRetainGlobals && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)) 8460 bool const fInvvpidSupported = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID); 8461 bool const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 8462 bool const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX); 8463 bool const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX); 8464 bool const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS); 8465 8466 bool afSupportedTypes[4]; 8467 afSupportedTypes[0] = fTypeIndivAddr; 8468 afSupportedTypes[1] = fTypeSingleCtx; 8469 afSupportedTypes[2] = fTypeAllCtx; 8470 afSupportedTypes[3] = fTypeSingleCtxRetainGlobals; 8471 8472 if ( fInvvpidSupported 8473 && !(u64InvvpidType & ~(uint64_t)VMX_INVVPID_VALID_MASK) 8474 && afSupportedTypes[u64InvvpidType & 3]) 8472 8475 { /* likely */ } 8473 8476 else … … 8590 8593 8591 8594 8595 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 8596 /** 8597 * INVEPT instruction execution worker. 8598 * 8599 * @returns Strict VBox status code. 8600 * @param pVCpu The cross context virtual CPU structure. 8601 * @param cbInstr The instruction length in bytes. 8602 * @param iEffSeg The segment of the invept descriptor. 8603 * @param GCPtrInveptDesc The address of invept descriptor. 8604 * @param u64InveptType The invalidation type. 8605 * @param pExitInfo Pointer to the VM-exit information. Optional, can be 8606 * NULL. 8607 * 8608 * @remarks Common VMX instruction checks are already expected to by the caller, 8609 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks. 8610 */ 8611 IEM_STATIC VBOXSTRICTRC iemVmxInvept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInveptDesc, 8612 uint64_t u64InveptType, PCVMXVEXITINFO pExitInfo) 8613 { 8614 /* Check if EPT is supported, otherwise raise #UD. */ 8615 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEpt) 8616 return iemRaiseUndefinedOpcode(pVCpu); 8617 8618 /* Nested-guest intercept. */ 8619 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 8620 { 8621 if (pExitInfo) 8622 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo); 8623 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVEPT, VMXINSTRID_NONE, cbInstr); 8624 } 8625 8626 /* CPL. */ 8627 if (pVCpu->iem.s.uCpl != 0) 8628 { 8629 Log(("invept: CPL != 0 -> #GP(0)\n")); 8630 return iemRaiseGeneralProtectionFault0(pVCpu); 8631 } 8632 8633 /* 8634 * Validate INVEPT invalidation type. 8635 * 8636 * The instruction specifies exactly ONE of the supported invalidation types. 8637 * 8638 * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is 8639 * supported. In theory, it's possible for a CPU to not support flushing individual 8640 * addresses but all the other types or any other combination. We do not take any 8641 * shortcuts here by assuming the types we currently expose to the guest. 8642 */ 8643 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps; 8644 bool const fInveptSupported = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT); 8645 bool const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_SINGLE_CTX); 8646 bool const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_ALL_CTX); 8647 8648 bool afSupportedTypes[4]; 8649 afSupportedTypes[0] = false; 8650 afSupportedTypes[1] = fTypeSingleCtx; 8651 afSupportedTypes[2] = fTypeAllCtx; 8652 afSupportedTypes[3] = false; 8653 8654 if ( fInveptSupported 8655 && !(u64InveptType & ~(uint64_t)VMX_INVEPT_VALID_MASK) 8656 && afSupportedTypes[u64InveptType & 3]) 8657 { /* likely */ } 8658 else 8659 { 8660 Log(("invept: invalid/unsupported invvpid type %#x -> VMFail\n", u64InveptType)); 8661 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_TypeInvalid; 8662 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InveptType; 8663 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8664 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8665 return VINF_SUCCESS; 8666 } 8667 8668 /* 8669 * Fetch the invvpid descriptor from guest memory. 8670 */ 8671 RTUINT128U uDesc; 8672 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInveptDesc); 8673 if (rcStrict == VINF_SUCCESS) 8674 { 8675 /* 8676 * Validate the descriptor. 8677 * 8678 * The Intel spec. does not explicit say the INVEPT instruction fails when reserved 8679 * bits in the descriptor are set, but it -does- for INVVPID. Until we test on real 8680 * hardware, it's assumed INVEPT behaves the same as INVVPID in this regard. It's 8681 * better to be strict in our emulation until proven otherwise. 8682 */ 8683 if (uDesc.s.Hi) 8684 { 8685 Log(("invept: reserved bits set in invept descriptor %#RX64 -> VMFail\n", uDesc.s.Hi)); 8686 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_DescRsvd; 8687 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uDesc.s.Hi; 8688 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8689 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8690 return VINF_SUCCESS; 8691 } 8692 8693 /* 8694 * Flush TLB mappings based on the EPT type. 8695 */ 8696 if (u64InveptType == VMXTLBFLUSHEPT_SINGLE_CONTEXT) 8697 { 8698 uint64_t const GCPhysEptPtr = uDesc.s.Lo; 8699 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, GCPhysEptPtr, NULL /* enmDiag */); 8700 if (RT_SUCCESS(rc)) 8701 { /* likely */ } 8702 else 8703 { 8704 Log(("invept: EPTP invalid %#RX64 -> VMFail\n", GCPhysEptPtr)); 8705 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_EptpInvalid; 8706 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysEptPtr; 8707 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8708 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8709 return VINF_SUCCESS; 8710 } 8711 } 8712 8713 /** @todo PGM support for EPT tags? Currently just flush everything. */ 8714 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 8715 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3; 8716 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */); 8717 } 8718 8719 return rcStrict; 8720 } 8721 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 8722 8723 8592 8724 /** 8593 8725 * VMXON instruction execution worker. … … 8987 9119 8988 9120 9121 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 9122 /** 9123 * Implements 'INVEPT'. 9124 */ 9125 IEM_CIMPL_DEF_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType) 9126 { 9127 return iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, uInveptType, NULL /* pExitInfo */); 9128 } 9129 #endif 9130 9131 8989 9132 /** 8990 9133 * Implements VMX's implementation of PAUSE. -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
r82968 r92844 303 303 304 304 /** Opcode 0x66 0x0f 0x38 0x80. */ 305 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 306 FNIEMOP_DEF(iemOp_invept_Gy_Mdq) 307 { 308 IEMOP_MNEMONIC(invept, "invept Gy,Mdq"); 309 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 310 IEMOP_HLP_IN_VMX_OPERATION("invept", kVmxVDiag_Invept); 311 IEMOP_HLP_VMX_INSTR("invept", kVmxVDiag_Invept); 312 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 313 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) 314 { 315 /* Register, memory. */ 316 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 317 { 318 IEM_MC_BEGIN(3, 0); 319 IEM_MC_ARG(uint8_t, iEffSeg, 0); 320 IEM_MC_ARG(RTGCPTR, GCPtrInveptDesc, 1); 321 IEM_MC_ARG(uint64_t, uInveptType, 2); 322 IEM_MC_FETCH_GREG_U64(uInveptType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 323 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInveptDesc, bRm, 0); 324 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 325 IEM_MC_CALL_CIMPL_3(iemCImpl_invept, iEffSeg, GCPtrInveptDesc, uInveptType); 326 IEM_MC_END(); 327 } 328 else 329 { 330 IEM_MC_BEGIN(3, 0); 331 IEM_MC_ARG(uint8_t, iEffSeg, 0); 332 IEM_MC_ARG(RTGCPTR, GCPtrInveptDesc, 1); 333 IEM_MC_ARG(uint32_t, uInveptType, 2); 334 IEM_MC_FETCH_GREG_U32(uInveptType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 335 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInveptDesc, bRm, 0); 336 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 337 IEM_MC_CALL_CIMPL_3(iemCImpl_invept, iEffSeg, GCPtrInveptDesc, uInveptType); 338 IEM_MC_END(); 339 } 340 } 341 Log(("iemOp_invept_Gy_Mdq: invalid encoding -> #UD\n")); 342 return IEMOP_RAISE_INVALID_OPCODE(); 343 } 344 #else 305 345 FNIEMOP_STUB(iemOp_invept_Gy_Mdq); 346 #endif 306 347 307 348 /** Opcode 0x66 0x0f 0x38 0x81. */
Note:
See TracChangeset
for help on using the changeset viewer.