VirtualBox

Changeset 92844 in vbox for trunk


Ignore:
Timestamp:
Dec 9, 2021 11:08:31 AM (3 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:10092 INVEPT instruction support.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r92706 r92844  
    181181 *  VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
    182182 *  VMX_EXIT_ERR_MACHINE_CHECK (we never need to raise this?)
    183  *  VMX_EXIT_EPT_VIOLATION
    184  *  VMX_EXIT_EPT_MISCONFIG
    185183 *  VMX_EXIT_INVEPT
    186184 *  VMX_EXIT_RDRAND
     
    59335931 * @returns VBox status code.
    59345932 * @param   pVCpu           The cross context virtual CPU structure.
     5933 * @param   uEptPtr         The EPT pointer to check.
    59355934 * @param   penmVmxDiag     Where to store the diagnostic reason on failure (not
    59365935 *                          updated on success). Optional, can be NULL.
    59375936 */
    5938 IEM_STATIC int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, VMXVDIAG *penmVmxDiag)
     5937IEM_STATIC int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr, VMXVDIAG *penmVmxDiag)
    59395938{
    59405939    VMXVDIAG enmVmxDiag;
    5941     PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    59425940
    59435941    /* Reserved bits. */
    59445942    uint8_t const  cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;
    59455943    uint64_t const fValidMask        = VMX_EPTP_VALID_MASK & ~(UINT64_MAX << cMaxPhysAddrWidth);
    5946     if (pVmcs->u64EptPtr.u & fValidMask)
     5944    if (uEptPtr & fValidMask)
    59475945    {
    59485946        /* Memory Type. */
    59495947        uint64_t const fCaps    = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
    5950         uint8_t const  fMemType = RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_MEMTYPE);
     5948        uint8_t const  fMemType = RT_BF_GET(uEptPtr, VMX_BF_EPTP_MEMTYPE);
    59515949        if (   (   fMemType == VMX_EPTP_MEMTYPE_WB
    59525950                && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_WB))
     
    59615959             */
    59625960            Assert(RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4));
    5963             if (RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3)
     5961            if (RT_BF_GET(uEptPtr, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3)
    59645962            {
    59655963                /* Access and dirty bits support in EPT structures. */
    5966                 if (   !RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_ACCESS_DIRTY)
     5964                if (   !RT_BF_GET(uEptPtr, VMX_BF_EPTP_ACCESS_DIRTY)
    59675965                    ||  RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY))
    59685966                    return VINF_SUCCESS;
     
    61966194        {
    61976195            VMXVDIAG enmVmxDiag;
    6198             int const rc = iemVmxVmentryCheckEptPtr(pVCpu, &enmVmxDiag);
     6196            int const rc = iemVmxVmentryCheckEptPtr(pVCpu, pVmcs->u64EptPtr.u, &enmVmxDiag);
    61996197            if (RT_SUCCESS(rc))
    62006198            { /* likely */ }
     
    75897587                        return VINF_SUCCESS;
    75907588                    }
    7591                     return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED,
    7592                                         pVmcs->u64RoExitQual.u);
     7589                    return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u);
    75937590                }
    75947591            }
    7595             return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED,
    7596                                 pVmcs->u64RoExitQual.u);
     7592            return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u);
    75977593        }
    75987594
     
    84628458     */
    84638459    uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
    8464     uint8_t const fTypeIndivAddr              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
    8465     uint8_t const fTypeSingleCtx              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
    8466     uint8_t const fTypeAllCtx                 = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
    8467     uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
    8468     if (   (fTypeIndivAddr              && u64InvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
    8469         || (fTypeSingleCtx              && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
    8470         || (fTypeAllCtx                 && u64InvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    8471         || (fTypeSingleCtxRetainGlobals && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
     8460    bool const fInvvpidSupported           = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID);
     8461    bool const fTypeIndivAddr              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
     8462    bool const fTypeSingleCtx              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
     8463    bool const fTypeAllCtx                 = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
     8464    bool const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
     8465
     8466    bool afSupportedTypes[4];
     8467    afSupportedTypes[0] = fTypeIndivAddr;
     8468    afSupportedTypes[1] = fTypeSingleCtx;
     8469    afSupportedTypes[2] = fTypeAllCtx;
     8470    afSupportedTypes[3] = fTypeSingleCtxRetainGlobals;
     8471
     8472    if (   fInvvpidSupported
     8473        && !(u64InvvpidType & ~(uint64_t)VMX_INVVPID_VALID_MASK)
     8474        && afSupportedTypes[u64InvvpidType & 3])
    84728475    { /* likely */ }
    84738476    else
     
    85908593
    85918594
     8595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     8596/**
     8597 * INVEPT instruction execution worker.
     8598 *
     8599 * @returns Strict VBox status code.
     8600 * @param   pVCpu               The cross context virtual CPU structure.
     8601 * @param   cbInstr             The instruction length in bytes.
     8602 * @param   iEffSeg             The segment of the invept descriptor.
     8603 * @param   GCPtrInveptDesc     The address of invept descriptor.
     8604 * @param   u64InveptType       The invalidation type.
     8605 * @param   pExitInfo           Pointer to the VM-exit information. Optional, can be
     8606 *                              NULL.
     8607 *
     8608 * @remarks Common VMX instruction checks are already expected to by the caller,
     8609 *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     8610 */
     8611IEM_STATIC VBOXSTRICTRC iemVmxInvept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInveptDesc,
     8612                                     uint64_t u64InveptType, PCVMXVEXITINFO pExitInfo)
     8613{
     8614    /* Check if EPT is supported, otherwise raise #UD. */
     8615    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEpt)
     8616        return iemRaiseUndefinedOpcode(pVCpu);
     8617
     8618    /* Nested-guest intercept. */
     8619    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
     8620    {
     8621        if (pExitInfo)
     8622            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
     8623        return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVEPT, VMXINSTRID_NONE, cbInstr);
     8624    }
     8625
     8626    /* CPL. */
     8627    if (pVCpu->iem.s.uCpl != 0)
     8628    {
     8629        Log(("invept: CPL != 0 -> #GP(0)\n"));
     8630        return iemRaiseGeneralProtectionFault0(pVCpu);
     8631    }
     8632
     8633    /*
     8634     * Validate INVEPT invalidation type.
     8635     *
     8636     * The instruction specifies exactly ONE of the supported invalidation types.
     8637     *
     8638     * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
     8639     * supported. In theory, it's possible for a CPU to not support flushing individual
     8640     * addresses but all the other types or any other combination. We do not take any
     8641     * shortcuts here by  assuming the types we currently expose to the guest.
     8642     */
     8643    uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
     8644    bool const fInveptSupported = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT);
     8645    bool const fTypeSingleCtx   = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_SINGLE_CTX);
     8646    bool const fTypeAllCtx      = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_ALL_CTX);
     8647
     8648    bool afSupportedTypes[4];
     8649    afSupportedTypes[0] = false;
     8650    afSupportedTypes[1] = fTypeSingleCtx;
     8651    afSupportedTypes[2] = fTypeAllCtx;
     8652    afSupportedTypes[3] = false;
     8653
     8654    if (   fInveptSupported
     8655        && !(u64InveptType & ~(uint64_t)VMX_INVEPT_VALID_MASK)
     8656        && afSupportedTypes[u64InveptType & 3])
     8657    { /* likely */ }
     8658    else
     8659    {
     8660        Log(("invept: invalid/unsupported invvpid type %#x -> VMFail\n", u64InveptType));
     8661        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag  = kVmxVDiag_Invept_TypeInvalid;
     8662        pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InveptType;
     8663        iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     8664        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     8665        return VINF_SUCCESS;
     8666    }
     8667
     8668    /*
     8669     * Fetch the invvpid descriptor from guest memory.
     8670     */
     8671    RTUINT128U uDesc;
     8672    VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInveptDesc);
     8673    if (rcStrict == VINF_SUCCESS)
     8674    {
     8675        /*
     8676         * Validate the descriptor.
     8677         *
     8678         * The Intel spec. does not explicit say the INVEPT instruction fails when reserved
     8679         * bits in the descriptor are set, but it -does- for INVVPID. Until we test on real
     8680         * hardware, it's assumed INVEPT behaves the same as INVVPID in this regard. It's
     8681         * better to be strict in our emulation until proven otherwise.
     8682         */
     8683        if (uDesc.s.Hi)
     8684        {
     8685            Log(("invept: reserved bits set in invept descriptor %#RX64 -> VMFail\n", uDesc.s.Hi));
     8686            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag  = kVmxVDiag_Invept_DescRsvd;
     8687            pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uDesc.s.Hi;
     8688            iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     8689            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     8690            return VINF_SUCCESS;
     8691        }
     8692
     8693        /*
     8694         * Flush TLB mappings based on the EPT type.
     8695         */
     8696        if (u64InveptType == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
     8697        {
     8698            uint64_t const GCPhysEptPtr = uDesc.s.Lo;
     8699            int const rc = iemVmxVmentryCheckEptPtr(pVCpu, GCPhysEptPtr, NULL /* enmDiag */);
     8700            if (RT_SUCCESS(rc))
     8701            { /* likely */ }
     8702            else
     8703            {
     8704                Log(("invept: EPTP invalid %#RX64 -> VMFail\n", GCPhysEptPtr));
     8705                pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag  = kVmxVDiag_Invept_EptpInvalid;
     8706                pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysEptPtr;
     8707                iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     8708                iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     8709                return VINF_SUCCESS;
     8710            }
     8711        }
     8712
     8713        /** @todo PGM support for EPT tags? Currently just flush everything. */
     8714        IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
     8715        uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
     8716        PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8717    }
     8718
     8719    return rcStrict;
     8720}
     8721#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     8722
     8723
    85928724/**
    85938725 * VMXON instruction execution worker.
     
    89879119
    89889120
     9121#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     9122/**
     9123 * Implements 'INVEPT'.
     9124 */
     9125IEM_CIMPL_DEF_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType)
     9126{
     9127    return iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, uInveptType, NULL /* pExitInfo */);
     9128}
     9129#endif
     9130
     9131
    89899132/**
    89909133 * Implements VMX's implementation of PAUSE.
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h

    r82968 r92844  
    303303
    304304/** Opcode 0x66 0x0f 0x38 0x80. */
     305#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     306FNIEMOP_DEF(iemOp_invept_Gy_Mdq)
     307{
     308    IEMOP_MNEMONIC(invept, "invept Gy,Mdq");
     309    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     310    IEMOP_HLP_IN_VMX_OPERATION("invept", kVmxVDiag_Invept);
     311    IEMOP_HLP_VMX_INSTR("invept", kVmxVDiag_Invept);
     312    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
     313    if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
     314    {
     315        /* Register, memory. */
     316        if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
     317        {
     318            IEM_MC_BEGIN(3, 0);
     319            IEM_MC_ARG(uint8_t,  iEffSeg,         0);
     320            IEM_MC_ARG(RTGCPTR,  GCPtrInveptDesc, 1);
     321            IEM_MC_ARG(uint64_t, uInveptType,     2);
     322            IEM_MC_FETCH_GREG_U64(uInveptType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
     323            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInveptDesc, bRm, 0);
     324            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
     325            IEM_MC_CALL_CIMPL_3(iemCImpl_invept, iEffSeg, GCPtrInveptDesc, uInveptType);
     326            IEM_MC_END();
     327        }
     328        else
     329        {
     330            IEM_MC_BEGIN(3, 0);
     331            IEM_MC_ARG(uint8_t,  iEffSeg,         0);
     332            IEM_MC_ARG(RTGCPTR,  GCPtrInveptDesc, 1);
     333            IEM_MC_ARG(uint32_t, uInveptType,     2);
     334            IEM_MC_FETCH_GREG_U32(uInveptType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
     335            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInveptDesc, bRm, 0);
     336            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
     337            IEM_MC_CALL_CIMPL_3(iemCImpl_invept, iEffSeg, GCPtrInveptDesc, uInveptType);
     338            IEM_MC_END();
     339        }
     340    }
     341    Log(("iemOp_invept_Gy_Mdq: invalid encoding -> #UD\n"));
     342    return IEMOP_RAISE_INVALID_OPCODE();
     343}
     344#else
    305345FNIEMOP_STUB(iemOp_invept_Gy_Mdq);
     346#endif
    306347
    307348/** Opcode 0x66 0x0f 0x38 0x81. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette