VirtualBox

Changeset 81200 in vbox


Ignore:
Timestamp:
Oct 10, 2019 5:08:13 AM (5 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 Postpone loading of VMCS related data structures (PGM loads) from checking to the loading phase. This gives us opportunities to optimize checks in the future.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r81002 r81200  
    56635663            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
    56645664        }
    5665 
    5666         /* Read the VMCS-link pointer from guest memory. */
    5667         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
    5668         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
    5669                                          GCPhysShadowVmcs, VMX_V_SHADOW_VMCS_SIZE);
    5670         if (RT_SUCCESS(rc))
    5671         { /* likely */ }
    5672         else
    5673         {
    5674             iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    5675             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
    5676         }
    5677 
    5678         /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
    5679         if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
    5680         { /* likely */ }
    5681         else
    5682         {
    5683             iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    5684             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
    5685         }
    5686 
    5687         /* Verify the shadow bit is set if VMCS shadowing is enabled . */
    5688         if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
    5689             || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
    5690         { /* likely */ }
    5691         else
    5692         {
    5693             iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    5694             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
    5695         }
    5696 
    5697         /* Finally update our cache of the guest physical address of the shadow VMCS. */
    5698         pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
    56995665    }
    57005666
     
    62776243        else
    62786244            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
    6279 
    6280         /* Read the IO bitmaps. */
    6281         /** @todo NSTVMX: Move this to be done later (while loading guest state) when
    6282          *        implementing fast path. */
    6283         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap));
    6284         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap),
    6285                                          GCPhysIoBitmapA, VMX_V_IO_BITMAP_A_SIZE);
    6286         if (RT_SUCCESS(rc))
    6287         { /* likely */ }
    6288         else
    6289             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapAPtrReadPhys);
    6290 
    6291         uint8_t *pbIoBitmapB = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
    6292         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbIoBitmapB, GCPhysIoBitmapB, VMX_V_IO_BITMAP_B_SIZE);
    6293         if (RT_SUCCESS(rc))
    6294         { /* likely */ }
    6295         else
    6296             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapBPtrReadPhys);
    62976245    }
    62986246
     
    63076255        else
    63086256            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
    6309 
    6310         /* Read the MSR bitmap. */
    6311         /** @todo NSTVMX: Move this to be done later (while loading guest state) when
    6312          *        implementing fast path. */
    6313         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
    6314         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
    6315                                          GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
    6316         if (RT_SUCCESS(rc))
    6317         { /* likely */ }
    6318         else
    6319             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
    63206257    }
    63216258
     
    63396276            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
    63406277
    6341         /* Verify TPR threshold and VTPR when both virtualize-APIC accesses and virtual-interrupt delivery aren't used. */
    6342         if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    6343             && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
    6344         {
    6345             /* Read the VTPR from the virtual-APIC page. */
    6346             uint8_t u8VTpr;
    6347             int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
    6348             if (RT_SUCCESS(rc))
    6349             { /* likely */ }
    6350             else
    6351                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
    6352 
    6353             /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
    6354             if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
    6355             { /* likely */ }
    6356             else
    6357                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
    6358         }
     6278        /* The rest done XXX document */
    63596279    }
    63606280    else
     
    64166336                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
    64176337        }
    6418 
    6419         /*
    6420          * Register the handler for the APIC-access page.
    6421          *
    6422          * We don't deregister the APIC-access page handler during the VM-exit as a different
    6423          * nested-VCPU might be using the same guest-physical address for its APIC-access page.
    6424          *
    6425          * We leave the page registered until the first access that happens outside VMX non-root
    6426          * mode. Guest software is allowed to access structures such as the APIC-access page
    6427          * only when no logical processor with a current VMCS references it in VMX non-root mode,
    6428          * otherwise it can lead to unpredictable behavior including guest triple-faults.
    6429          *
    6430          * See Intel spec. 24.11.4 "Software Access to Related Structures".
    6431          */
    6432         if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
    6433         {
    6434             int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
    6435                                                 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
    6436                                                 NIL_RTR0PTR /* pvUserR0 */,  NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
    6437             if (RT_SUCCESS(rc))
    6438             { /* likely */ }
    6439             else
    6440                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
    6441         }
    64426338    }
    64436339
     
    64916387        else
    64926388            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
    6493 
    6494         /* Read the VMREAD-bitmap. */
    6495         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
    6496         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
    6497                                          GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
    6498         if (RT_SUCCESS(rc))
    6499         { /* likely */ }
    6500         else
    6501             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
    6502 
    6503         /* Read the VMWRITE-bitmap. */
    6504         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
    6505         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
    6506                                      GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
    6507         if (RT_SUCCESS(rc))
    6508         { /* likely */ }
    6509         else
    6510             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
    65116389    }
    65126390
     
    68446722
    68456723/**
     6724 * Loads the guest VMCS referenced state (such as MSR bitmaps, I/O bitmaps etc).
     6725 *
     6726 * @param   pVCpu   The cross context virtual CPU structure.
     6727 * @param   pszInstr    The VMX instruction name (for logging purposes).
     6728 *
     6729 * @remarks This assumes various VMCS related data structure pointers have already
     6730 *          been verified prior to calling this function.
     6731 */
     6732IEM_STATIC int iemVmxVmentryLoadGuestVmcsRefState(PVMCPUCC pVCpu, const char *pszInstr)
     6733{
     6734    const char *const pszFailure  = "VM-exit";
     6735    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6736
     6737    /*
     6738     * Virtualize APIC accesses.
     6739     */
     6740    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     6741    {
     6742        /* APIC-access physical address. */
     6743        RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
     6744
     6745        /*
     6746         * Register the handler for the APIC-access page.
     6747         *
     6748         * We don't deregister the APIC-access page handler during the VM-exit as a different
     6749         * nested-VCPU might be using the same guest-physical address for its APIC-access page.
     6750         *
     6751         * We leave the page registered until the first access that happens outside VMX non-root
     6752         * mode. Guest software is allowed to access structures such as the APIC-access page
     6753         * only when no logical processor with a current VMCS references it in VMX non-root mode,
     6754         * otherwise it can lead to unpredictable behavior including guest triple-faults.
     6755         *
     6756         * See Intel spec. 24.11.4 "Software Access to Related Structures".
     6757         */
     6758        if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
     6759        {
     6760            int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
     6761                                                pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
     6762                                                NIL_RTR0PTR /* pvUserR0 */,  NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
     6763            if (RT_SUCCESS(rc))
     6764            { /* likely */ }
     6765            else
     6766                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
     6767        }
     6768    }
     6769
     6770    /*
     6771     * VMCS shadowing.
     6772     */
     6773    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     6774    {
     6775        /* Read the VMREAD-bitmap. */
     6776        RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
     6777        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
     6778        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
     6779                                         GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     6780        if (RT_SUCCESS(rc))
     6781        { /* likely */ }
     6782        else
     6783            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
     6784
     6785        /* Read the VMWRITE-bitmap. */
     6786        RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmwriteBitmap.u;
     6787        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
     6788        rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
     6789                                     GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     6790        if (RT_SUCCESS(rc))
     6791        { /* likely */ }
     6792        else
     6793            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
     6794    }
     6795
     6796    /*
     6797     * I/O bitmaps.
     6798     */
     6799    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
     6800    {
     6801        /* Read the IO bitmap A. */
     6802        RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
     6803        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap));
     6804        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap),
     6805                                         GCPhysIoBitmapA, VMX_V_IO_BITMAP_A_SIZE);
     6806        if (RT_SUCCESS(rc))
     6807        { /* likely */ }
     6808        else
     6809            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapAPtrReadPhys);
     6810
     6811        /* Read the IO bitmap B. */
     6812        RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
     6813        uint8_t *pbIoBitmapB = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
     6814        rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbIoBitmapB, GCPhysIoBitmapB, VMX_V_IO_BITMAP_B_SIZE);
     6815        if (RT_SUCCESS(rc))
     6816        { /* likely */ }
     6817        else
     6818            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapBPtrReadPhys);
     6819    }
     6820
     6821    /*
     6822     * TPR shadow and Virtual-APIC page.
     6823     */
     6824    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     6825    {
     6826        /* Verify TPR threshold and VTPR when both virtualize-APIC accesses and virtual-interrupt delivery aren't used. */
     6827        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     6828            && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     6829        {
     6830            /* Read the VTPR from the virtual-APIC page. */
     6831            RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     6832            uint8_t u8VTpr;
     6833            int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
     6834            if (RT_SUCCESS(rc))
     6835            { /* likely */ }
     6836            else
     6837                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
     6838
     6839            /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
     6840            if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
     6841            { /* likely */ }
     6842            else
     6843                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
     6844        }
     6845    }
     6846
     6847    /*
     6848     * VMCS link pointer.
     6849     */
     6850    if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
     6851    {
     6852        /* Read the VMCS-link pointer from guest memory. */
     6853        RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
     6854        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
     6855        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
     6856                                         GCPhysShadowVmcs, VMX_V_SHADOW_VMCS_SIZE);
     6857        if (RT_SUCCESS(rc))
     6858        { /* likely */ }
     6859        else
     6860        {
     6861            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
     6862            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
     6863        }
     6864
     6865        /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
     6866        if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
     6867        { /* likely */ }
     6868        else
     6869        {
     6870            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
     6871            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
     6872        }
     6873
     6874        /* Verify the shadow bit is set if VMCS shadowing is enabled . */
     6875        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     6876            || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
     6877        { /* likely */ }
     6878        else
     6879        {
     6880            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
     6881            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
     6882        }
     6883
     6884        /* Update our cache of the guest physical address of the shadow VMCS. */
     6885        pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
     6886    }
     6887
     6888    /*
     6889     * MSR bitmap.
     6890     */
     6891    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     6892    {
     6893        /* Read the MSR bitmap. */
     6894        RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
     6895        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
     6896        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
     6897                                         GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
     6898        if (RT_SUCCESS(rc))
     6899        { /* likely */ }
     6900        else
     6901            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
     6902    }
     6903
     6904    NOREF(pszFailure);
     6905    NOREF(pszInstr);
     6906    return VINF_SUCCESS;
     6907}
     6908
     6909
     6910/**
    68466911 * Loads the guest-state as part of VM-entry.
    68476912 *
     
    68556920IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPUCC pVCpu, const char *pszInstr)
    68566921{
     6922    /* Load guest control registers, MSRs (that are directly part of the VMCS). */
    68576923    iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
     6924
     6925    /* Load guest segment registers. */
    68586926    iemVmxVmentryLoadGuestSegRegs(pVCpu);
    68596927
     
    68716939    pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick      = 0;
    68726940
     6941    /* Load guest non-register state (such as interrupt shadows, NMI blocking etc). */
    68736942    iemVmxVmentryLoadGuestNonRegState(pVCpu);
     6943
     6944    /* Load VMX related structures and state referenced by the VMCS. */
     6945    int rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);
     6946    if (rc == VINF_SUCCESS)
     6947    { /* likely */ }
     6948    else
     6949        return rc;
    68746950
    68756951    NOREF(pszInstr);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette