VirtualBox

Changeset 74065 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Sep 4, 2018 12:38:25 PM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM, HM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r74061 r74065  
    127127    /* VMLAUNCH/VMRESUME. */
    128128    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrApicAccess           , "AddrApicAccess"          ),
     129    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad         , "AddrEntryMsrLoad"        ),
     130    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrLoad          , "AddrExitMsrLoad"         ),
     131    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrStore         , "AddrExitMsrStore"        ),
    129132    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapA            , "AddrIoBitmapA"           ),
    130133    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapB            , "AddrIoBitmapB"           ),
     
    137140    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cpl                      , "Cpl"                     ),
    138141    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cr3TargetCount           , "Cr3TargetCount"          ),
     142    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1        , "EntryCtlsAllowed1"       ),
     143    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0     , "EntryCtlsDisallowed0"    ),
    139144    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1         , "ExitCtlsAllowed1"        ),
    140145    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0      , "ExitCtlsDisallowed0"     ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74061 r74065  
    8686        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
    8787        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
    88         /*     3 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrStore),
    89         /*     4 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrLoad),
    90         /*     5 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmEntryMsrLoad),
     88        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
     89        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
     90        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
    9191        /*     6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
    9292        /*     7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
     
    167167    {
    168168        /*     0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
    169         /*     1 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitReason),
    170         /*     2 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitIntInfo),
    171         /*     3 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitErrCode),
     169        /*     1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
     170        /*     2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
     171        /*     3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
    172172        /*     4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
    173173        /*     5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
    174         /*     6 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrLen),
    175         /*     7 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrInfo),
     174        /*     6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
     175        /*     7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
    176176        /*  8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    177177        /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    19581958
    19591959/**
     1960 * Checks VM-entry controls fields as part of VM-entry.
     1961 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     1962 *
     1963 * @returns VBox status code.
     1964 * @param   pVCpu           The cross context virtual CPU structure.
     1965 * @param   pszInstr        The VMX instruction name (for logging purposes).
     1966 */
     1967IEM_STATIC VBOXSTRICTRC iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
     1968{
     1969    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1970
     1971    /* VM-entry controls. */
     1972    VMXCTLSMSR EntryCtls;
     1973    EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
     1974    if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
     1975    {
     1976        Log(("%s: Invalid EntryCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls));
     1977        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0;
     1978        return VERR_VMX_VMENTRY_FAILED;
     1979    }
     1980    if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
     1981    {
     1982        Log(("%s: Invalid EntryCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls));
     1983        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1;
     1984        return VERR_VMX_VMENTRY_FAILED;
     1985    }
     1986
     1987    /** @todo NSTVMX: rest of entry ctls. */
     1988
     1989    /* VM-entry MSR-load count and VM-entry MSR-load area address. */
     1990    uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;
     1991    if (pVmcs->u32EntryMsrLoadCount)
     1992    {
     1993        if (   (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     1994            || (pVmcs->u64AddrEntryMsrLoad.u >> cMaxPhysAddrWidth)
     1995            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
     1996        {
     1997            Log(("%s: VM-entry MSR-load area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrEntryMsrLoad.u));
     1998            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad;
     1999            return VERR_VMX_VMENTRY_FAILED;
     2000        }
     2001    }
     2002
     2003    Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM));           /* We don't support SMM yet. */
     2004    Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON));    /* We don't support dual-monitor treatment yet. */
     2005
     2006    NOREF(pszInstr);
     2007    return VINF_SUCCESS;
     2008}
     2009
     2010
     2011/**
    19602012 * Checks VM-exit controls fields as part of VM-entry.
    19612013 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
     
    19942046    }
    19952047
    1996     /** @todo NSTVMX: rest of exit ctls. */
     2048    /* VM-exit MSR-store count and VM-exit MSR-store area address. */
     2049    uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;
     2050    if (pVmcs->u32ExitMsrStoreCount)
     2051    {
     2052        if (   (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
     2053            || (pVmcs->u64AddrExitMsrStore.u >> cMaxPhysAddrWidth)
     2054            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
     2055        {
     2056            Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrStore.u));
     2057            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrStore;
     2058            return VERR_VMX_VMENTRY_FAILED;
     2059        }
     2060    }
     2061
     2062    /* VM-exit MSR-load count and VM-exit MSR-load area address. */
     2063    if (pVmcs->u32ExitMsrLoadCount)
     2064    {
     2065        if (   (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     2066            || (pVmcs->u64AddrExitMsrLoad.u >> cMaxPhysAddrWidth)
     2067            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
     2068        {
     2069            Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrLoad.u));
     2070            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrLoad;
     2071            return VERR_VMX_VMENTRY_FAILED;
     2072        }
     2073    }
    19972074
    19982075    NOREF(pszInstr);
     
    20832160    {
    20842161        if (   (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
    2085             || (pVmcs->u64AddrIoBitmapA.u >> cMaxPhysAddrWidth))
     2162            || (pVmcs->u64AddrIoBitmapA.u >> cMaxPhysAddrWidth)
     2163            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
    20862164        {
    20872165            Log(("%s: I/O Bitmap A physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapA.u));
     
    20912169
    20922170        if (   (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
    2093             || (pVmcs->u64AddrIoBitmapB.u >> cMaxPhysAddrWidth))
     2171            || (pVmcs->u64AddrIoBitmapB.u >> cMaxPhysAddrWidth)
     2172            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
    20942173        {
    20952174            Log(("%s: I/O Bitmap B physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapB.u));
     
    24132492
    24142493    /*
    2415      * Check VM-exit fields.
     2494     * Check VM-exit control fields.
    24162495     */
    24172496    rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
     2497    if (rc == VINF_SUCCESS)
     2498    { /* likely */ }
     2499    else
     2500    {
     2501        iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
     2502        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     2503        return VINF_SUCCESS;
     2504    }
     2505
     2506    /*
     2507     * Check VM-entry control fields.
     2508     */
     2509    rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
    24182510    if (rc == VINF_SUCCESS)
    24192511    { /* likely */ }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette