VirtualBox

Changeset 81201 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Oct 10, 2019 5:19:06 AM (5 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 Combine iemVmxVmentryCheckExecCtls, iemVmxVmentryCheckExitCtls and iemVmxVmentryCheckEntryCtls into a single function.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r81200 r81201  
    59895989
    59905990/**
    5991  * Checks VM-entry controls fields as part of VM-entry.
    5992  * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     5991 * Checks VMCS controls fields as part of VM-entry.
    59935992 *
    59945993 * @returns VBox status code.
    59955994 * @param   pVCpu       The cross context virtual CPU structure.
    59965995 * @param   pszInstr    The VMX instruction name (for logging purposes).
    5997  */
    5998 IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPUCC pVCpu, const char *pszInstr)
     5996 *
     5997 * @remarks This may update secondary-processor based VM-execution control fields
     5998 *          in the current VMCS if necessary.
     5999 */
     6000IEM_STATIC int iemVmxVmentryCheckCtls(PVMCPUCC pVCpu, const char *pszInstr)
    59996001{
    60006002    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    60016003    const char * const pszFailure = "VMFail";
    60026004
    6003     /* VM-entry controls. */
    6004     VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
    6005     if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
    6006     { /* likely */ }
    6007     else
    6008         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
    6009 
    6010     if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
    6011     { /* likely */ }
    6012     else
    6013         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
    6014 
    6015     /* Event injection. */
    6016     uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
    6017     if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
    6018     {
    6019         /* Type and vector. */
    6020         uint8_t const uType   = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
    6021         uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
    6022         uint8_t const uRsvd   = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
    6023         if (   !uRsvd
    6024             && VMXIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
    6025             && VMXIsEntryIntInfoVectorValid(uVector, uType))
    6026         { /* likely */ }
    6027         else
    6028             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
    6029 
    6030         /* Exception error code. */
    6031         if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
    6032         {
    6033             /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
    6034             if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
    6035                 ||  (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
     6005    /*
     6006     * VM-execution controls.
     6007     * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
     6008     */
     6009    {
     6010        /* Pin-based VM-execution controls. */
     6011        {
     6012            VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
     6013            if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
    60366014            { /* likely */ }
    60376015            else
    6038                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
    6039 
    6040             /* Exceptions that provide an error code. */
    6041             if (   uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
    6042                 && (   uVector == X86_XCPT_DF
    6043                     || uVector == X86_XCPT_TS
    6044                     || uVector == X86_XCPT_NP
    6045                     || uVector == X86_XCPT_SS
    6046                     || uVector == X86_XCPT_GP
    6047                     || uVector == X86_XCPT_PF
    6048                     || uVector == X86_XCPT_AC))
     6016                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
     6017
     6018            if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
    60496019            { /* likely */ }
    60506020            else
    6051                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
    6052 
    6053             /* Exception error-code reserved bits. */
    6054             if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
     6021                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
     6022        }
     6023
     6024        /* Processor-based VM-execution controls. */
     6025        {
     6026            VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
     6027            if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
    60556028            { /* likely */ }
    60566029            else
    6057                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
    6058 
    6059             /* Injecting a software interrupt, software exception or privileged software exception. */
    6060             if (   uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
    6061                 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
    6062                 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
     6030                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
     6031
     6032            if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
     6033            { /* likely */ }
     6034            else
     6035                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
     6036        }
     6037
     6038        /* Secondary processor-based VM-execution controls. */
     6039        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     6040        {
     6041            VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
     6042            if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
     6043            { /* likely */ }
     6044            else
     6045                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
     6046
     6047            if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
     6048            { /* likely */ }
     6049            else
     6050                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
     6051        }
     6052        else
     6053            Assert(!pVmcs->u32ProcCtls2);
     6054
     6055        /* CR3-target count. */
     6056        if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
     6057        { /* likely */ }
     6058        else
     6059            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
     6060
     6061        /* I/O bitmaps physical addresses. */
     6062        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
     6063        {
     6064            RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
     6065            if (   !(GCPhysIoBitmapA & X86_PAGE_4K_OFFSET_MASK)
     6066                && !(GCPhysIoBitmapA >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6067                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapA))
     6068            { /* likely */ }
     6069            else
     6070                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
     6071
     6072            RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
     6073            if (   !(GCPhysIoBitmapB & X86_PAGE_4K_OFFSET_MASK)
     6074                && !(GCPhysIoBitmapB >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6075                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapB))
     6076            { /* likely */ }
     6077            else
     6078                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
     6079        }
     6080
     6081        /* MSR bitmap physical address. */
     6082        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     6083        {
     6084            RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
     6085            if (   !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
     6086                && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6087                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
     6088            { /* likely */ }
     6089            else
     6090                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
     6091        }
     6092
     6093        /* TPR shadow related controls. */
     6094        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     6095        {
     6096            /* Virtual-APIC page physical address. */
     6097            RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     6098            if (   !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
     6099                && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6100                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
     6101            { /* likely */ }
     6102            else
     6103                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
     6104
     6105            /* TPR threshold bits 31:4 MBZ without virtual-interrupt delivery. */
     6106            if (   !(pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)
     6107                ||  (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     6108            { /* likely */ }
     6109            else
     6110                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
     6111
     6112            /* The rest done XXX document */
     6113        }
     6114        else
     6115        {
     6116            if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
     6117                && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     6118                && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     6119            { /* likely */ }
     6120            else
    60636121            {
    6064                 /* Instruction length must be in the range 0-15. */
    6065                 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
     6122                if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
     6123                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
     6124                if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     6125                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
     6126                Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
     6127                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
     6128            }
     6129        }
     6130
     6131        /* NMI exiting and virtual-NMIs. */
     6132        if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
     6133            || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     6134        { /* likely */ }
     6135        else
     6136            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
     6137
     6138        /* Virtual-NMIs and NMI-window exiting. */
     6139        if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     6140            || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
     6141        { /* likely */ }
     6142        else
     6143            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
     6144
     6145        /* Virtualize APIC accesses. */
     6146        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     6147        {
     6148            /* APIC-access physical address. */
     6149            RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
     6150            if (   !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
     6151                && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6152                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
     6153            { /* likely */ }
     6154            else
     6155                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
     6156
     6157            /*
     6158             * Disallow APIC-access page and virtual-APIC page from being the same address.
     6159             * Note! This is not an Intel requirement, but one imposed by our implementation.
     6160             */
     6161            /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
     6162             *        redirecting accesses between the APIC-access page and the virtual-APIC
     6163             *        page. If any nested hypervisor requires this, we can implement it later. */
     6164            if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     6165            {
     6166                RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     6167                if (GCPhysVirtApic != GCPhysApicAccess)
    60666168                { /* likely */ }
    60676169                else
    6068                     IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
    6069 
    6070                 /* However, instruction length of 0 is allowed only when its CPU feature is present. */
    6071                 if (   pVmcs->u32EntryInstrLen != 0
    6072                     || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
     6170                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
     6171            }
     6172        }
     6173
     6174        /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
     6175        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
     6176            || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
     6177        { /* likely */ }
     6178        else
     6179            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
     6180
     6181        /* Virtual-interrupt delivery requires external interrupt exiting. */
     6182        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     6183            ||  (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
     6184        { /* likely */ }
     6185        else
     6186            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
     6187
     6188        /* VPID. */
     6189        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
     6190            || pVmcs->u16Vpid != 0)
     6191        { /* likely */ }
     6192        else
     6193            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
     6194
     6195        Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT));             /* We don't support posted interrupts yet. */
     6196        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));                /* We don't support EPT yet. */
     6197        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML));                /* We don't support PML yet. */
     6198        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
     6199        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC));             /* We don't support VM functions yet. */
     6200        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE));             /* We don't support EPT-violation #VE yet. */
     6201        Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT));    /* We don't support Pause-loop exiting yet. */
     6202
     6203        /* VMCS shadowing. */
     6204        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     6205        {
     6206            /* VMREAD-bitmap physical address. */
     6207            RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
     6208            if (   !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
     6209                && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6210                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
     6211            { /* likely */ }
     6212            else
     6213                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
     6214
     6215            /* VMWRITE-bitmap physical address. */
     6216            RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
     6217            if (   !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
     6218                && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6219                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
     6220            { /* likely */ }
     6221            else
     6222                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
     6223        }
     6224    }
     6225
     6226    /*
     6227     * VM-exit controls.
     6228     * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
     6229     */
     6230    {
     6231        VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
     6232        if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
     6233        { /* likely */ }
     6234        else
     6235            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
     6236
     6237        if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
     6238        { /* likely */ }
     6239        else
     6240            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
     6241
     6242        /* Save preemption timer without activating it. */
     6243        if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
     6244            || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
     6245        { /* likely */ }
     6246        else
     6247            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
     6248
     6249        /* VM-exit MSR-store count and VM-exit MSR-store area address. */
     6250        if (pVmcs->u32ExitMsrStoreCount)
     6251        {
     6252            if (   !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
     6253                && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6254                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
     6255            { /* likely */ }
     6256            else
     6257                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
     6258        }
     6259
     6260        /* VM-exit MSR-load count and VM-exit MSR-load area address. */
     6261        if (pVmcs->u32ExitMsrLoadCount)
     6262        {
     6263            if (   !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     6264                && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6265                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
     6266            { /* likely */ }
     6267            else
     6268                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
     6269        }
     6270    }
     6271
     6272    /*
     6273     * VM-entry controls.
     6274     * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     6275     */
     6276    {
     6277        VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
     6278        if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
     6279        { /* likely */ }
     6280        else
     6281            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
     6282
     6283        if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
     6284        { /* likely */ }
     6285        else
     6286            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
     6287
     6288        /* Event injection. */
     6289        uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
     6290        if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
     6291        {
     6292            /* Type and vector. */
     6293            uint8_t const uType   = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
     6294            uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
     6295            uint8_t const uRsvd   = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
     6296            if (   !uRsvd
     6297                && VMXIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
     6298                && VMXIsEntryIntInfoVectorValid(uVector, uType))
     6299            { /* likely */ }
     6300            else
     6301                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
     6302
     6303            /* Exception error code. */
     6304            if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
     6305            {
     6306                /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
     6307                if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
     6308                    ||  (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
    60736309                { /* likely */ }
    60746310                else
    6075                     IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
     6311                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
     6312
     6313                /* Exceptions that provide an error code. */
     6314                if (   uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
     6315                    && (   uVector == X86_XCPT_DF
     6316                        || uVector == X86_XCPT_TS
     6317                        || uVector == X86_XCPT_NP
     6318                        || uVector == X86_XCPT_SS
     6319                        || uVector == X86_XCPT_GP
     6320                        || uVector == X86_XCPT_PF
     6321                        || uVector == X86_XCPT_AC))
     6322                { /* likely */ }
     6323                else
     6324                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
     6325
     6326                /* Exception error-code reserved bits. */
     6327                if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
     6328                { /* likely */ }
     6329                else
     6330                    IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
     6331
     6332                /* Injecting a software interrupt, software exception or privileged software exception. */
     6333                if (   uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
     6334                    || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
     6335                    || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
     6336                {
     6337                    /* Instruction length must be in the range 0-15. */
     6338                    if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
     6339                    { /* likely */ }
     6340                    else
     6341                        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
     6342
     6343                    /* However, instruction length of 0 is allowed only when its CPU feature is present. */
     6344                    if (   pVmcs->u32EntryInstrLen != 0
     6345                        || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
     6346                    { /* likely */ }
     6347                    else
     6348                        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
     6349                }
    60766350            }
    60776351        }
    6078     }
    6079 
    6080     /* VM-entry MSR-load count and VM-entry MSR-load area address. */
    6081     if (pVmcs->u32EntryMsrLoadCount)
    6082     {
    6083         if (   !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    6084             && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6085             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
    6086         { /* likely */ }
    6087         else
    6088             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
    6089     }
    6090 
    6091     Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM));           /* We don't support SMM yet. */
    6092     Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON));    /* We don't support dual-monitor treatment yet. */
    6093 
    6094     NOREF(pszInstr);
    6095     NOREF(pszFailure);
    6096     return VINF_SUCCESS;
    6097 }
    6098 
    6099 
    6100 /**
    6101  * Checks VM-exit controls fields as part of VM-entry.
    6102  * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
    6103  *
    6104  * @returns VBox status code.
    6105  * @param   pVCpu       The cross context virtual CPU structure.
    6106  * @param   pszInstr    The VMX instruction name (for logging purposes).
    6107  */
    6108 IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPUCC pVCpu, const char *pszInstr)
    6109 {
    6110     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    6111     const char * const pszFailure = "VMFail";
    6112 
    6113     /* VM-exit controls. */
    6114     VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
    6115     if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
    6116     { /* likely */ }
    6117     else
    6118         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
    6119 
    6120     if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
    6121     { /* likely */ }
    6122     else
    6123         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
    6124 
    6125     /* Save preemption timer without activating it. */
    6126     if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
    6127         || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
    6128     { /* likely */ }
    6129     else
    6130         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
    6131 
    6132     /* VM-exit MSR-store count and VM-exit MSR-store area address. */
    6133     if (pVmcs->u32ExitMsrStoreCount)
    6134     {
    6135         if (   !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
    6136             && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6137             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
    6138         { /* likely */ }
    6139         else
    6140             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
    6141     }
    6142 
    6143     /* VM-exit MSR-load count and VM-exit MSR-load area address. */
    6144     if (pVmcs->u32ExitMsrLoadCount)
    6145     {
    6146         if (   !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    6147             && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6148             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
    6149         { /* likely */ }
    6150         else
    6151             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
    6152     }
    6153 
    6154     NOREF(pszInstr);
    6155     NOREF(pszFailure);
    6156     return VINF_SUCCESS;
    6157 }
    6158 
    6159 
    6160 /**
    6161  * Checks VM-execution controls fields as part of VM-entry.
    6162  * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
    6163  *
    6164  * @returns VBox status code.
    6165  * @param   pVCpu       The cross context virtual CPU structure.
    6166  * @param   pszInstr    The VMX instruction name (for logging purposes).
    6167  *
    6168  * @remarks This may update secondary-processor based VM-execution control fields
    6169  *          in the current VMCS if necessary.
    6170  */
    6171 IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPUCC pVCpu, const char *pszInstr)
    6172 {
    6173     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    6174     const char * const pszFailure = "VMFail";
    6175 
    6176     /* Pin-based VM-execution controls. */
    6177     {
    6178         VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
    6179         if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
    6180         { /* likely */ }
    6181         else
    6182             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
    6183 
    6184         if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
    6185         { /* likely */ }
    6186         else
    6187             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
    6188     }
    6189 
    6190     /* Processor-based VM-execution controls. */
    6191     {
    6192         VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
    6193         if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
    6194         { /* likely */ }
    6195         else
    6196             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
    6197 
    6198         if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
    6199         { /* likely */ }
    6200         else
    6201             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
    6202     }
    6203 
    6204     /* Secondary processor-based VM-execution controls. */
    6205     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    6206     {
    6207         VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
    6208         if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
    6209         { /* likely */ }
    6210         else
    6211             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
    6212 
    6213         if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
    6214         { /* likely */ }
    6215         else
    6216             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
    6217     }
    6218     else
    6219         Assert(!pVmcs->u32ProcCtls2);
    6220 
    6221     /* CR3-target count. */
    6222     if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
    6223     { /* likely */ }
    6224     else
    6225         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
    6226 
    6227     /* I/O bitmaps physical addresses. */
    6228     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
    6229     {
    6230         RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
    6231         if (   !(GCPhysIoBitmapA & X86_PAGE_4K_OFFSET_MASK)
    6232             && !(GCPhysIoBitmapA >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6233             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapA))
    6234         { /* likely */ }
    6235         else
    6236             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
    6237 
    6238         RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
    6239         if (   !(GCPhysIoBitmapB & X86_PAGE_4K_OFFSET_MASK)
    6240             && !(GCPhysIoBitmapB >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6241             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapB))
    6242         { /* likely */ }
    6243         else
    6244             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
    6245     }
    6246 
    6247     /* MSR bitmap physical address. */
    6248     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    6249     {
    6250         RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
    6251         if (   !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
    6252             && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6253             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
    6254         { /* likely */ }
    6255         else
    6256             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
    6257     }
    6258 
    6259     /* TPR shadow related controls. */
    6260     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    6261     {
    6262         /* Virtual-APIC page physical address. */
    6263         RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    6264         if (   !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
    6265             && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6266             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
    6267         { /* likely */ }
    6268         else
    6269             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
    6270 
    6271         /* TPR threshold bits 31:4 MBZ without virtual-interrupt delivery. */
    6272         if (   !(pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)
    6273             ||  (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
    6274         { /* likely */ }
    6275         else
    6276             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
    6277 
    6278         /* The rest done XXX document */
    6279     }
    6280     else
    6281     {
    6282         if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
    6283             && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
    6284             && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
    6285         { /* likely */ }
    6286         else
    6287         {
    6288             if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
    6289                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
    6290             if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
    6291                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
    6292             Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    6293             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
    6294         }
    6295     }
    6296 
    6297     /* NMI exiting and virtual-NMIs. */
    6298     if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
    6299         || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
    6300     { /* likely */ }
    6301     else
    6302         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
    6303 
    6304     /* Virtual-NMIs and NMI-window exiting. */
    6305     if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    6306         || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
    6307     { /* likely */ }
    6308     else
    6309         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
    6310 
    6311     /* Virtualize APIC accesses. */
    6312     if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    6313     {
    6314         /* APIC-access physical address. */
    6315         RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
    6316         if (   !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
    6317             && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6318             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
    6319         { /* likely */ }
    6320         else
    6321             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
    6322 
    6323         /*
    6324          * Disallow APIC-access page and virtual-APIC page from being the same address.
    6325          * Note! This is not an Intel requirement, but one imposed by our implementation.
    6326          */
    6327         /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
    6328          *        redirecting accesses between the APIC-access page and the virtual-APIC
    6329          *        page. If any nested hypervisor requires this, we can implement it later. */
    6330         if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    6331         {
    6332             RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    6333             if (GCPhysVirtApic != GCPhysApicAccess)
     6352
     6353        /* VM-entry MSR-load count and VM-entry MSR-load area address. */
     6354        if (pVmcs->u32EntryMsrLoadCount)
     6355        {
     6356            if (   !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     6357                && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6358                &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
    63346359            { /* likely */ }
    63356360            else
    6336                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
    6337         }
    6338     }
    6339 
    6340     /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
    6341     if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
    6342         || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
    6343     { /* likely */ }
    6344     else
    6345         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
    6346 
    6347     /* Virtual-interrupt delivery requires external interrupt exiting. */
    6348     if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
    6349         ||  (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
    6350     { /* likely */ }
    6351     else
    6352         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
    6353 
    6354     /* VPID. */
    6355     if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
    6356         || pVmcs->u16Vpid != 0)
    6357     { /* likely */ }
    6358     else
    6359         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
    6360 
    6361     Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT));             /* We don't support posted interrupts yet. */
    6362     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));                /* We don't support EPT yet. */
    6363     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML));                /* We don't support PML yet. */
    6364     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
    6365     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC));             /* We don't support VM functions yet. */
    6366     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE));             /* We don't support EPT-violation #VE yet. */
    6367     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT));    /* We don't support Pause-loop exiting yet. */
    6368 
    6369     /* VMCS shadowing. */
    6370     if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
    6371     {
    6372         /* VMREAD-bitmap physical address. */
    6373         RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
    6374         if (   !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
    6375             && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6376             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
    6377         { /* likely */ }
    6378         else
    6379             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
    6380 
    6381         /* VMWRITE-bitmap physical address. */
    6382         RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
    6383         if (   !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
    6384             && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6385             &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
    6386         { /* likely */ }
    6387         else
    6388             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
     6361                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
     6362        }
     6363
     6364        Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM));           /* We don't support SMM yet. */
     6365        Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON));    /* We don't support dual-monitor treatment yet. */
    63896366    }
    63906367
     
    74157392    Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
    74167393
    7417     int rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
     7394    int rc = iemVmxVmentryCheckCtls(pVCpu, pszInstr);
    74187395    if (RT_SUCCESS(rc))
    74197396    {
    7420         rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
     7397        rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
    74217398        if (RT_SUCCESS(rc))
    74227399        {
    7423             rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
     7400            /* Initialize read-only VMCS fields before VM-entry since we don't update all of them for every VM-exit. */
     7401            iemVmxVmentryInitReadOnlyFields(pVCpu);
     7402
     7403            /*
     7404             * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
     7405             * So we save the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
     7406             * VM-exit when required.
     7407             * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
     7408             */
     7409            iemVmxVmentrySaveNmiBlockingFF(pVCpu);
     7410
     7411            rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
    74247412            if (RT_SUCCESS(rc))
    74257413            {
    7426                 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
     7414                rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
    74277415                if (RT_SUCCESS(rc))
    74287416                {
    7429                     /* Initialize read-only VMCS fields before VM-entry since we don't update all of them for every VM-exit. */
    7430                     iemVmxVmentryInitReadOnlyFields(pVCpu);
    7431 
    7432                     /*
    7433                      * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
    7434                      * So we save the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
    7435                      * VM-exit when required.
    7436                      * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
    7437                      */
    7438                     iemVmxVmentrySaveNmiBlockingFF(pVCpu);
    7439 
    7440                     rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
     7417                    rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
    74417418                    if (RT_SUCCESS(rc))
    74427419                    {
    7443                         rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
    7444                         if (RT_SUCCESS(rc))
     7420                        Assert(rc != VINF_CPUM_R3_MSR_WRITE);
     7421
     7422                        /* VMLAUNCH instruction must update the VMCS launch state. */
     7423                        if (uInstrId == VMXINSTRID_VMLAUNCH)
     7424                            pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
     7425
     7426                        /* Perform the VMX transition (PGM updates). */
     7427                        VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     7428                        if (rcStrict == VINF_SUCCESS)
     7429                        { /* likely */ }
     7430                        else if (RT_SUCCESS(rcStrict))
    74457431                        {
    7446                             rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
    7447                             if (RT_SUCCESS(rc))
    7448                             {
    7449                                 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
    7450 
    7451                                 /* VMLAUNCH instruction must update the VMCS launch state. */
    7452                                 if (uInstrId == VMXINSTRID_VMLAUNCH)
    7453                                     pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
    7454 
    7455                                 /* Perform the VMX transition (PGM updates). */
    7456                                 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
    7457                                 if (rcStrict == VINF_SUCCESS)
    7458                                 { /* likely */ }
    7459                                 else if (RT_SUCCESS(rcStrict))
    7460                                 {
    7461                                     Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
    7462                                           VBOXSTRICTRC_VAL(rcStrict)));
    7463                                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    7464                                 }
    7465                                 else
    7466                                 {
    7467                                     Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
    7468                                     return rcStrict;
    7469                                 }
    7470 
    7471                                 /* Paranoia. */
    7472                                 Assert(rcStrict == VINF_SUCCESS);
    7473 
    7474                                 /* We've now entered nested-guest execution. */
    7475                                 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
    7476 
    7477                                 /*
    7478                                  * The priority of potential VM-exits during VM-entry is important.
    7479                                  * The priorities of VM-exits and events are listed from highest
    7480                                  * to lowest as follows:
    7481                                  *
    7482                                  * 1.  Event injection.
    7483                                  * 2.  Trap on task-switch (T flag set in TSS).
    7484                                  * 3.  TPR below threshold / APIC-write.
    7485                                  * 4.  SMI, INIT.
    7486                                  * 5.  MTF exit.
    7487                                  * 6.  Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
    7488                                  * 7.  VMX-preemption timer.
    7489                                  * 9.  NMI-window exit.
    7490                                  * 10. NMI injection.
    7491                                  * 11. Interrupt-window exit.
    7492                                  * 12. Virtual-interrupt injection.
    7493                                  * 13. Interrupt injection.
    7494                                  * 14. Process next instruction (fetch, decode, execute).
    7495                                  */
    7496 
    7497                                 /* Setup VMX-preemption timer. */
    7498                                 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
    7499 
    7500                                 /* Setup monitor-trap flag. */
    7501                                 iemVmxVmentrySetupMtf(pVCpu, pszInstr);
    7502 
    7503                                 /* Setup NMI-window exiting. */
    7504                                 iemVmxVmentrySetupNmiWindow(pVCpu, pszInstr);
    7505 
    7506                                 /* Setup interrupt-window exiting. */
    7507                                 iemVmxVmentrySetupIntWindow(pVCpu, pszInstr);
    7508 
    7509                                 /*
    7510                                  * Inject any event that the nested hypervisor wants to inject.
    7511                                  * Note! We cannot immediately perform the event injection here as we may have
    7512                                  *       pending PGM operations to perform due to switching page tables and/or
    7513                                  *       mode.
    7514                                  */
    7515                                 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
     7432                            Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
     7433                                  VBOXSTRICTRC_VAL(rcStrict)));
     7434                            rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     7435                        }
     7436                        else
     7437                        {
     7438                            Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
     7439                            return rcStrict;
     7440                        }
     7441
     7442                        /* Paranoia. */
     7443                        Assert(rcStrict == VINF_SUCCESS);
     7444
     7445                        /* We've now entered nested-guest execution. */
     7446                        pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
     7447
     7448                        /*
     7449                         * The priority of potential VM-exits during VM-entry is important.
     7450                         * The priorities of VM-exits and events are listed from highest
     7451                         * to lowest as follows:
     7452                         *
     7453                         * 1.  Event injection.
     7454                         * 2.  Trap on task-switch (T flag set in TSS).
     7455                         * 3.  TPR below threshold / APIC-write.
     7456                         * 4.  SMI, INIT.
     7457                         * 5.  MTF exit.
     7458                         * 6.  Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
     7459                         * 7.  VMX-preemption timer.
     7460                         * 9.  NMI-window exit.
     7461                         * 10. NMI injection.
     7462                         * 11. Interrupt-window exit.
     7463                         * 12. Virtual-interrupt injection.
     7464                         * 13. Interrupt injection.
     7465                         * 14. Process next instruction (fetch, decode, execute).
     7466                         */
     7467
     7468                        /* Setup VMX-preemption timer. */
     7469                        iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
     7470
     7471                        /* Setup monitor-trap flag. */
     7472                        iemVmxVmentrySetupMtf(pVCpu, pszInstr);
     7473
     7474                        /* Setup NMI-window exiting. */
     7475                        iemVmxVmentrySetupNmiWindow(pVCpu, pszInstr);
     7476
     7477                        /* Setup interrupt-window exiting. */
     7478                        iemVmxVmentrySetupIntWindow(pVCpu, pszInstr);
     7479
     7480                        /*
     7481                         * Inject any event that the nested hypervisor wants to inject.
     7482                         * Note! We cannot immediately perform the event injection here as we may have
     7483                         *       pending PGM operations to perform due to switching page tables and/or
     7484                         *       mode.
     7485                         */
     7486                        iemVmxVmentryInjectEvent(pVCpu, pszInstr);
    75167487
    75177488# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    7518                                 /* Reschedule to IEM-only execution of the nested-guest. */
    7519                                 Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
    7520                                 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
    7521                                 if (rcSched != VINF_SUCCESS)
    7522                                     iemSetPassUpStatus(pVCpu, rcSched);
     7489                        /* Reschedule to IEM-only execution of the nested-guest. */
     7490                        Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
     7491                        int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     7492                        if (rcSched != VINF_SUCCESS)
     7493                            iemSetPassUpStatus(pVCpu, rcSched);
    75237494# endif
    75247495
    7525                                 /* Finally, done. */
    7526                                 Log3(("%s: cs:rip=%#04x:%#RX64 cr0=%#RX64 (%#RX64) cr4=%#RX64 (%#RX64) efer=%#RX64\n",
    7527                                       pszInstr, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
    7528                                       pVmcs->u64Cr0ReadShadow.u, pVCpu->cpum.GstCtx.cr4, pVmcs->u64Cr4ReadShadow.u,
    7529                                       pVCpu->cpum.GstCtx.msrEFER));
    7530                                 return VINF_SUCCESS;
    7531                             }
    7532                             return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED,
    7533                                                 pVmcs->u64RoExitQual.u);
    7534                         }
     7496                        /* Finally, done. */
     7497                        Log3(("%s: cs:rip=%#04x:%#RX64 cr0=%#RX64 (%#RX64) cr4=%#RX64 (%#RX64) efer=%#RX64\n",
     7498                              pszInstr, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
     7499                              pVmcs->u64Cr0ReadShadow.u, pVCpu->cpum.GstCtx.cr4, pVmcs->u64Cr4ReadShadow.u,
     7500                              pVCpu->cpum.GstCtx.msrEFER));
     7501                        return VINF_SUCCESS;
    75357502                    }
    7536                     return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED,
     7503                    return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED,
    75377504                                        pVmcs->u64RoExitQual.u);
    75387505                }
    7539 
    7540                 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
    7541                 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    7542                 return VINF_SUCCESS;
    75437506            }
    7544         }
     7507            return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED,
     7508                                pVmcs->u64RoExitQual.u);
     7509        }
     7510
     7511        iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
     7512        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     7513        return VINF_SUCCESS;
    75457514    }
    75467515
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette