Changeset 74065 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 4, 2018 12:38:25 PM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74061 r74065 127 127 /* VMLAUNCH/VMRESUME. */ 128 128 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ), 129 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad , "AddrEntryMsrLoad" ), 130 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrLoad , "AddrExitMsrLoad" ), 131 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrStore , "AddrExitMsrStore" ), 129 132 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ), 130 133 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ), … … 137 140 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cpl , "Cpl" ), 138 141 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ), 142 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ), 143 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ), 139 144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ), 140 145 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ), -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74061 r74065 86 86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB), 87 87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap), 88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Addr VmExitMsrStore),89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Addr VmExitMsrLoad),90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Addr VmEntryMsrLoad),88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore), 89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad), 90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad), 91 91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr), 92 92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml), … … 167 167 { 168 168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError), 169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32Ro VmExitReason),170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32Ro VmExitIntInfo),171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32Ro VmExitErrCode),169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason), 170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo), 171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode), 172 172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo), 173 173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode), 174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32Ro VmExitInstrLen),175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32Ro VmExitInstrInfo),174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen), 175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo), 176 176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, 177 177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, … … 1958 1958 1959 1959 /** 1960 * Checks VM-entry controls fields as part of VM-entry. 1961 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 1962 * 1963 * @returns VBox status code. 1964 * @param pVCpu The cross context virtual CPU structure. 1965 * @param pszInstr The VMX instruction name (for logging purposes). 1966 */ 1967 IEM_STATIC VBOXSTRICTRC iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr) 1968 { 1969 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1970 1971 /* VM-entry controls. */ 1972 VMXCTLSMSR EntryCtls; 1973 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu); 1974 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0) 1975 { 1976 Log(("%s: Invalid EntryCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls)); 1977 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0; 1978 return VERR_VMX_VMENTRY_FAILED; 1979 } 1980 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1) 1981 { 1982 Log(("%s: Invalid EntryCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls)); 1983 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1; 1984 return VERR_VMX_VMENTRY_FAILED; 1985 } 1986 1987 /** @todo NSTVMX: rest of entry ctls. */ 1988 1989 /* VM-entry MSR-load count and VM-entry MSR-load area address. */ 1990 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth; 1991 if (pVmcs->u32EntryMsrLoadCount) 1992 { 1993 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 1994 || (pVmcs->u64AddrEntryMsrLoad.u >> cMaxPhysAddrWidth) 1995 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 1996 { 1997 Log(("%s: VM-entry MSR-load area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrEntryMsrLoad.u)); 1998 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad; 1999 return VERR_VMX_VMENTRY_FAILED; 2000 } 2001 } 2002 2003 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */ 2004 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */ 2005 2006 NOREF(pszInstr); 2007 return VINF_SUCCESS; 2008 } 2009 2010 2011 /** 1960 2012 * Checks VM-exit controls fields as part of VM-entry. 1961 2013 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields". … … 1994 2046 } 1995 2047 1996 /** @todo NSTVMX: rest of exit ctls. */ 2048 /* VM-exit MSR-store count and VM-exit MSR-store area address. */ 2049 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth; 2050 if (pVmcs->u32ExitMsrStoreCount) 2051 { 2052 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK) 2053 || (pVmcs->u64AddrExitMsrStore.u >> cMaxPhysAddrWidth) 2054 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 2055 { 2056 Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrStore.u)); 2057 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrStore; 2058 return VERR_VMX_VMENTRY_FAILED; 2059 } 2060 } 2061 2062 /* VM-exit MSR-load count and VM-exit MSR-load area address. */ 2063 if (pVmcs->u32ExitMsrLoadCount) 2064 { 2065 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 2066 || (pVmcs->u64AddrExitMsrLoad.u >> cMaxPhysAddrWidth) 2067 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 2068 { 2069 Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrLoad.u)); 2070 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrLoad; 2071 return VERR_VMX_VMENTRY_FAILED; 2072 } 2073 } 1997 2074 1998 2075 NOREF(pszInstr); … … 2083 2160 { 2084 2161 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK) 2085 || (pVmcs->u64AddrIoBitmapA.u >> cMaxPhysAddrWidth)) 2162 || (pVmcs->u64AddrIoBitmapA.u >> cMaxPhysAddrWidth) 2163 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 2086 2164 { 2087 2165 Log(("%s: I/O Bitmap A physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapA.u)); … … 2091 2169 2092 2170 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 2093 || (pVmcs->u64AddrIoBitmapB.u >> cMaxPhysAddrWidth)) 2171 || (pVmcs->u64AddrIoBitmapB.u >> cMaxPhysAddrWidth) 2172 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 2094 2173 { 2095 2174 Log(("%s: I/O Bitmap B physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapB.u)); … … 2413 2492 2414 2493 /* 2415 * Check VM-exit fields.2494 * Check VM-exit control fields. 2416 2495 */ 2417 2496 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr); 2497 if (rc == VINF_SUCCESS) 2498 { /* likely */ } 2499 else 2500 { 2501 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS); 2502 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 2503 return VINF_SUCCESS; 2504 } 2505 2506 /* 2507 * Check VM-entry control fields. 2508 */ 2509 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr); 2418 2510 if (rc == VINF_SUCCESS) 2419 2511 { /* likely */ }
Note:
See TracChangeset
for help on using the changeset viewer.