VirtualBox

Changeset 74258 in vbox


Ignore:
Timestamp:
Sep 14, 2018 4:20:16 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
125057
Message:

VMM/IEM, CPUM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified trunk/include/VBox/vmm/cpumctx.h

    r74155 r74258  
    602602                uint32_t                uVmwriteBitmapR3Padding;
    603603#endif
    604                 /** 0x348 - Padding. */
    605                 uint8_t             abPadding[0x3f0 - 0x348];
     604                /** 0x348 - The MSR auto-load/store area - R0 ptr. */
     605                R0PTRTYPE(PVMXAUTOMSR)  pAutoMsrAreaR0;
     606#if HC_ARCH_BITS == 32
     607                uint32_t                uAutoMsrAreaR0;
     608#endif
     609                /** 0x350 - The MSR auto-load/store area - R3 ptr. */
     610                R3PTRTYPE(PVMXAUTOMSR)  pAutoMsrAreaR3;
     611#if HC_ARCH_BITS == 32
     612                uint32_t                uAutoMsrAreaR3;
     613#endif
     614                /** 0x358 - Padding. */
     615                uint8_t             abPadding[0x3f0 - 0x358];
    606616            } vmx;
    607617        } CPUM_UNION_NM(s);
  • TabularUnified trunk/include/VBox/vmm/hm_vmx.h

    r74227 r74258  
    29222922 */
    29232923
    2924 /** CR0 bits set here must always be set when in VMX operation. */
    2925 #define VMX_V_CR0_FIXED0                                        (X86_CR0_PE | X86_CR0_NE | X86_CR0_PG)
    2926 /** VMX_V_CR0_FIXED0 when unrestricted-guest execution is supported for the guest. */
    2927 #define VMX_V_CR0_FIXED0_UX                                     (VMX_V_CR0_FIXED0 & ~(X86_CR0_PE | X86_CR0_PG))
    2928 /** CR4 bits set here must always be set when in VMX operation. */
    2929 #define VMX_V_CR4_FIXED0                                        (X86_CR4_VMXE)
    2930 
    2931 /** Virtual VMCS revision ID. Bump this arbitarily chosen identifier if incompatible
    2932  *  changes to the layout of VMXVVMCS is done.  Bit 31 MBZ.  */
    2933 #define VMX_V_VMCS_REVISION_ID                                  UINT32_C(0x1d000001)
    2934 AssertCompile(!(VMX_V_VMCS_REVISION_ID & RT_BIT(31)));
    2935 
    2936 /** The size of the virtual VMCS region (we use the maximum allowed size to avoid
    2937  *  complications when teleporation may be implemented). */
    2938 #define VMX_V_VMCS_SIZE                                         X86_PAGE_4K_SIZE
    2939 /** The size of the virtual VMCS region (in pages). */
    2940 #define VMX_V_VMCS_PAGES                                        1
    2941 
    2942 /** The size of the Virtual-APIC page (in bytes).  */
    2943 #define VMX_V_VIRT_APIC_SIZE                                    X86_PAGE_4K_SIZE
    2944 /** The size of the Virtual-APIC page (in pages). */
    2945 #define VMX_V_VIRT_APIC_PAGES                                   1
    2946 
    2947 /** The size of the VMREAD/VMWRITE bitmap (in bytes). */
    2948 #define VMX_V_VMREAD_VMWRITE_BITMAP_SIZE                        X86_PAGE_4K_SIZE
    2949 /** The size of the VMREAD/VMWRITE-bitmap (in pages). */
    2950 #define VMX_V_VMREAD_VMWRITE_BITMAP_PAGES                       1
    2951 
    2952 /** The highest index value used for supported virtual VMCS field encoding. */
    2953 #define VMX_V_VMCS_MAX_INDEX                                    RT_BF_GET(VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH, VMX_BF_VMCS_ENC_INDEX)
    2954 
    29552924/** @name Virtual VMX MSR - Miscellaneous data.
    29562925 * @{ */
     
    29622931#define VMX_V_PREEMPT_TIMER_SHIFT                               5
    29632932/** Maximum number of MSRs in the auto-load/store MSR areas, (n+1) * 512. */
    2964 #define VMX_V_MAX_MSRS                                          0
     2933#define VMX_V_AUTOMSR_COUNT_MAX                                 0
    29652934/** SMM MSEG revision ID. */
    29662935#define VMX_V_MSEG_REV_ID                                       0
     
    29742943#define VMX_V_VMCS_STATE_LAUNCHED       RT_BIT(2)
    29752944/** @} */
     2945
     2946/** CR0 bits set here must always be set when in VMX operation. */
     2947#define VMX_V_CR0_FIXED0                                        (X86_CR0_PE | X86_CR0_NE | X86_CR0_PG)
     2948/** VMX_V_CR0_FIXED0 when unrestricted-guest execution is supported for the guest. */
     2949#define VMX_V_CR0_FIXED0_UX                                     (VMX_V_CR0_FIXED0 & ~(X86_CR0_PE | X86_CR0_PG))
     2950/** CR4 bits set here must always be set when in VMX operation. */
     2951#define VMX_V_CR4_FIXED0                                        (X86_CR4_VMXE)
     2952
     2953/** Virtual VMCS revision ID. Bump this arbitarily chosen identifier if incompatible
     2954 *  changes to the layout of VMXVVMCS is done.  Bit 31 MBZ.  */
     2955#define VMX_V_VMCS_REVISION_ID                                  UINT32_C(0x1d000001)
     2956AssertCompile(!(VMX_V_VMCS_REVISION_ID & RT_BIT(31)));
     2957
     2958/** The size of the virtual VMCS region (we use the maximum allowed size to avoid
     2959 *  complications when teleporation may be implemented). */
     2960#define VMX_V_VMCS_SIZE                                         X86_PAGE_4K_SIZE
     2961/** The size of the virtual VMCS region (in pages). */
     2962#define VMX_V_VMCS_PAGES                                        1
     2963
     2964/** The size of the Virtual-APIC page (in bytes).  */
     2965#define VMX_V_VIRT_APIC_SIZE                                    X86_PAGE_4K_SIZE
     2966/** The size of the Virtual-APIC page (in pages). */
     2967#define VMX_V_VIRT_APIC_PAGES                                   1
     2968
     2969/** The size of the VMREAD/VMWRITE bitmap (in bytes). */
     2970#define VMX_V_VMREAD_VMWRITE_BITMAP_SIZE                        X86_PAGE_4K_SIZE
     2971/** The size of the VMREAD/VMWRITE-bitmap (in pages). */
     2972#define VMX_V_VMREAD_VMWRITE_BITMAP_PAGES                       1
     2973
     2974/** The size of the auto-load/store MSR area (in bytes). */
     2975#define VMX_V_AUTOMSR_AREA_SIZE                                 ((512 * (VMX_V_AUTOMSR_COUNT_MAX + 1)) * sizeof(VMXAUTOMSR))
     2976/* Assert that the size is page aligned or adjust the VMX_V_AUTOMSR_AREA_PAGES macro below. */
     2977AssertCompile(RT_ALIGN_Z(VMX_V_AUTOMSR_AREA_SIZE, X86_PAGE_4K_SIZE) == VMX_V_AUTOMSR_AREA_SIZE);
     2978/** The size of the auto-load/store MSR area (in pages). */
     2979#define VMX_V_AUTOMSR_AREA_PAGES                                ((VMX_V_AUTOMSR_AREA_SIZE) >> X86_PAGE_4K_SHIFT)
     2980
     2981/** The highest index value used for supported virtual VMCS field encoding. */
     2982#define VMX_V_VMCS_MAX_INDEX                                    RT_BF_GET(VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH, VMX_BF_VMCS_ENC_INDEX)
    29762983
    29772984/**
     
    37613768    kVmxVDiag_Vmentry_HostSysenterEspEip,
    37623769    kVmxVDiag_Vmentry_LongModeCS,
     3770    kVmxVDiag_Vmentry_MsrLoad,
     3771    kVmxVDiag_Vmentry_MsrLoadCount,
     3772    kVmxVDiag_Vmentry_MsrLoadPtrReadPhys,
     3773    kVmxVDiag_Vmentry_MsrLoadRing3,
     3774    kVmxVDiag_Vmentry_MsrLoadRsvd,
    37633775    kVmxVDiag_Vmentry_NmiWindowExit,
    37643776    kVmxVDiag_Vmentry_PinCtlsAllowed1,
  • TabularUnified trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r74171 r74258  
    15191519        int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_MISC, &uHostMsr);
    15201520        AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc);
    1521         uint8_t const cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_MAX_MSRS);
     1521        uint8_t const cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
    15221522        uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
    15231523        uVmxMsr = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC,       VMX_V_PREEMPT_TIMER_SHIFT            )
  • TabularUnified trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r74227 r74258  
    308308    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSysenterEspEip       , "HostSysenterEspEip"        ),
    309309    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_LongModeCS               , "LongModeCS"                ),
     310    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoad                  , "MsrLoad"                   ),
     311    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadCount             , "MsrLoadCount"              ),
     312    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadPtrReadPhys       , "MsrLoadPtrReadPhys"        ),
     313    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRing3             , "MsrLoadRing3"              ),
     314    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRsvd              , "MsrLoadRsvd"               ),
    310315    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_NmiWindowExit            , "NmiWindowExit"             ),
    311316    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsAllowed1          , "PinCtlsAllowed1"           ),
  • TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74227 r74258  
    39873987
    39883988/**
     3989 * Loads the guest auto-load MSRs area as part of VM-entry.
     3990 *
     3991 * @param   pVCpu       The cross context virtual CPU structure.
     3992 * @param   pszInstr    The VMX instruction name (for logging purposes).
     3993 */
     3994IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
     3995{
     3996    /*
     3997     * Load guest MSRs.
     3998     * See Intel spec. 26.4 "Loading MSRs".
     3999     */
     4000    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4001    const char *const pszFailure = "VM-exit";
     4002
     4003    /*
     4004     * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
     4005     * exceeded including possibly raising #MC exceptions during VMX transition. Our
     4006     * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
     4007     */
     4008    uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
     4009    uint32_t const cMaxSupportedMsrs  = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr);
     4010    uint32_t const cMsrs              = pVmcs->u32EntryMsrLoadCount;
     4011    Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
     4012    if (cMsrs <= cMaxSupportedMsrs)
     4013    { /* likely */ }
     4014    else
     4015    {
     4016        pVmcs->u64ExitQual.u = VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR);
     4017        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
     4018    }
     4019
     4020    RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
     4021    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
     4022                                     GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
     4023    if (RT_SUCCESS(rc))
     4024    {
     4025        PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); NOREF(pMsr);
     4026        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     4027        {
     4028            if (   !pMsr->u32Reserved
     4029                &&  pMsr->u32Msr != MSR_K8_FS_BASE
     4030                &&  pMsr->u32Msr != MSR_K8_GS_BASE
     4031                &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
     4032                &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
     4033            {
     4034                rc = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
     4035                if (rc == VINF_SUCCESS)
     4036                    continue;
     4037
     4038                /*
     4039                 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
     4040                 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
     4041                 * indicated further with a different diagnostic code. Later, we can try implement handling of
     4042                 * the MSR in ring-0 if possible, or come up with a better, generic solution.
     4043                 */
     4044                pVmcs->u64ExitQual.u = idxMsr;
     4045                VMXVDIAG const enmDiag = rc == VINF_CPUM_R3_MSR_WRITE
     4046                                       ? kVmxVDiag_Vmentry_MsrLoadRing3
     4047                                       : kVmxVDiag_Vmentry_MsrLoad;
     4048                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
     4049            }
     4050            else
     4051            {
     4052                pVmcs->u64ExitQual.u = idxMsr;
     4053                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
     4054            }
     4055        }
     4056    }
     4057    else
     4058    {
     4059        Log(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
     4060        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_MsrLoadPtrReadPhys;
     4061        return rc;
     4062    }
     4063
     4064    NOREF(pszInstr);
     4065    NOREF(pszFailure);
     4066    return VINF_SUCCESS;
     4067}
     4068
     4069
     4070/**
    39894071 * Loads the guest-state as part of VM-entry.
    39904072 *
     
    40164098    /* Clear address-range monitoring. */
    40174099    EMMonitorWaitClear(pVCpu);
    4018 
    4019     /* Load MSRs. */
    40204100
    40214101    NOREF(pszInstr);
     
    41424222                        if (RT_SUCCESS(rc))
    41434223                        {
    4144                             iemVmxVmSucceed(pVCpu);
    4145                             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    4146                             return VINF_SUCCESS;
     4224                            /* Load MSRs from the VM-entry auto-load MSR area. */
     4225                            rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
     4226                            if (RT_SUCCESS(rc))
     4227                            {
     4228                                Assert(rc != VINF_CPUM_R3_MSR_WRITE);
     4229
     4230                                iemVmxVmSucceed(pVCpu);
     4231                                iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     4232                                return VINF_SUCCESS;
     4233                            }
     4234
     4235                            /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_MSR_LOAD and set
     4236                             *        VMX_BF_EXIT_REASON_ENTRY_FAILED. */
    41474237                        }
    41484238                    }
  • TabularUnified trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r74155 r74258  
    931931            pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3 = NULL;
    932932        }
     933        if (pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3)
     934        {
     935            SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, VMX_V_AUTOMSR_AREA_PAGES);
     936            pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3 = NULL;
     937        }
    933938    }
    934939}
     
    952957         * Allocate the nested-guest current VMCS.
    953958         */
    954         SUPPAGE SupNstGstVmcsPage;
    955         RT_ZERO(SupNstGstVmcsPage);
    956         SupNstGstVmcsPage.Phys = NIL_RTHCPHYS;
    957959        Assert(VMX_V_VMCS_PAGES == 1);
    958960        Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3);
    959961        rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3,
    960                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, &SupNstGstVmcsPage);
     962                              &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, NULL /* paPages */);
    961963        if (RT_FAILURE(rc))
    962964        {
     
    10061008            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMWRITE-bitmap\n", pVCpu->idCpu,
    10071009                    VMX_V_VMREAD_VMWRITE_BITMAP_PAGES));
     1010            break;
     1011        }
     1012
     1013        /*
     1014         * Allocate the MSR auto-load/store area.
     1015         */
     1016        Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3);
     1017        rc = SUPR3PageAllocEx(VMX_V_AUTOMSR_AREA_PAGES, 0 /* fFlags */,
     1018                              (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3,
     1019                              &pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR0, NULL /* paPages */);
     1020        if (RT_FAILURE(rc))
     1021        {
     1022            Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3);
     1023            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's auto-load/store MSR area\n", pVCpu->idCpu,
     1024                    VMX_V_AUTOMSR_AREA_PAGES));
    10081025            break;
    10091026        }
  • TabularUnified trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r74155 r74258  
    155155    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0);
    156156    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3);
     157    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR0);
     158    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR3);
     159    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR0);
     160    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR3);
     161    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR0);
     162    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR3);
     163    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR0);
     164    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR3);
    157165    GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions);
    158166    GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette