Changeset 74113 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 6, 2018 11:49:14 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r74102 r74113 1316 1316 uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID ) 1317 1317 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE ) 1318 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, VMX_V_VMCS_PHYSADDR_4G_LIMIT)1318 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, !pGuestFeatures->fLongMode ) 1319 1319 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 ) 1320 1320 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB ) -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74104 r74113 151 151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsr , "HostEferMsr" ), 152 152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongMode , "HostGuestLongMode" ), 153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu , "HostGuestLongModeNoCpu" ), 153 154 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ), 154 155 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostPatMsr , "HostPatMsr" ), -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74105 r74113 360 360 } while (0) 361 361 # endif /* !IEM_WITH_CODE_TLB */ 362 363 /** The maximum physical address width in bits. */ 364 #define IEM_VMX_MAX_PHYSADDR_WIDTH(a_pVCpu) (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->cVmxMaxPhysAddrWidth) 362 365 363 366 /** Whether a shadow VMCS is present for the given VCPU. */ … … 1535 1538 1536 1539 /* VMCS physical-address width limits. */ 1537 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT); 1538 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 1540 if (GCPhysVmcs >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 1539 1541 { 1540 1542 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 1695 1697 1696 1698 /* VMCS physical-address width limits. */ 1697 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT); 1698 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 1699 if (GCPhysVmcs >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 1699 1700 { 1700 1701 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 1863 1864 1864 1865 /* VMXON physical-address width limits. */ 1865 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT); 1866 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 1866 if (GCPhysVmxon >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 1867 1867 { 1868 1868 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n")); … … 1959 1959 1960 1960 /** 1961 * Clears the high 32-bits of all natural-width fields in the given VMCS.1962 *1963 * @param pVmcs Pointer to the virtual VMCS.1964 */1965 IEM_STATIC void iemVmxVmcsFixNaturalWidthFields(PVMXVVMCS pVmcs)1966 {1967 /* Natural-width Control fields. */1968 pVmcs->u64Cr0Mask.s.Hi = 0;1969 pVmcs->u64Cr4Mask.s.Hi = 0;1970 pVmcs->u64Cr0ReadShadow.s.Hi = 0;1971 pVmcs->u64Cr4ReadShadow.s.Hi = 0;1972 pVmcs->u64Cr3Target0.s.Hi = 0;1973 pVmcs->u64Cr3Target1.s.Hi = 0;1974 pVmcs->u64Cr3Target2.s.Hi = 0;1975 pVmcs->u64Cr3Target3.s.Hi = 0;1976 1977 /* Natural-width Read-only data fields. */1978 pVmcs->u64ExitQual.s.Hi = 0;1979 pVmcs->u64IoRcx.s.Hi = 0;1980 pVmcs->u64IoRsi.s.Hi = 0;1981 pVmcs->u64IoRdi.s.Hi = 0;1982 pVmcs->u64IoRip.s.Hi = 0;1983 pVmcs->u64GuestLinearAddr.s.Hi = 0;1984 1985 /* Natural-width Guest-state Fields. */1986 pVmcs->u64GuestCr0.s.Hi = 0;1987 pVmcs->u64GuestCr3.s.Hi = 0;1988 pVmcs->u64GuestCr4.s.Hi = 0;1989 pVmcs->u64GuestEsBase.s.Hi = 0;1990 pVmcs->u64GuestCsBase.s.Hi = 0;1991 pVmcs->u64GuestSsBase.s.Hi = 0;1992 pVmcs->u64GuestDsBase.s.Hi = 0;1993 pVmcs->u64GuestFsBase.s.Hi = 0;1994 pVmcs->u64GuestGsBase.s.Hi = 0;1995 pVmcs->u64GuestLdtrBase.s.Hi = 0;1996 pVmcs->u64GuestTrBase.s.Hi = 0;1997 pVmcs->u64GuestGdtrBase.s.Hi = 0;1998 pVmcs->u64GuestIdtrBase.s.Hi = 0;1999 pVmcs->u64GuestDr7.s.Hi = 0;2000 pVmcs->u64GuestRsp.s.Hi = 0;2001 pVmcs->u64GuestRip.s.Hi = 0;2002 pVmcs->u64GuestRFlags.s.Hi = 0;2003 pVmcs->u64GuestPendingDbgXcpt.s.Hi = 0;2004 pVmcs->u64GuestSysenterEsp.s.Hi = 0;2005 pVmcs->u64GuestSysenterEip.s.Hi = 0;2006 2007 /* Natural-width Host-state fields. */2008 pVmcs->u64HostCr0.s.Hi = 0;2009 pVmcs->u64HostCr3.s.Hi = 0;2010 pVmcs->u64HostCr4.s.Hi = 0;2011 pVmcs->u64HostFsBase.s.Hi = 0;2012 pVmcs->u64HostGsBase.s.Hi = 0;2013 pVmcs->u64HostTrBase.s.Hi = 0;2014 pVmcs->u64HostGdtrBase.s.Hi = 0;2015 pVmcs->u64HostIdtrBase.s.Hi = 0;2016 pVmcs->u64HostSysenterEsp.s.Hi = 0;2017 pVmcs->u64HostSysenterEip.s.Hi = 0;2018 pVmcs->u64HostRsp.s.Hi = 0;2019 pVmcs->u64HostRip.s.Hi = 0;2020 }2021 2022 2023 /**2024 1961 * Checks host state as part of VM-entry. 2025 1962 * … … 2081 2018 { 2082 2019 /* CR3 reserved bits. */ 2083 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth; 2084 if (pVmcs->u64HostCr3.u >> cMaxPhysAddrWidth) 2020 if (pVmcs->u64HostCr3.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2085 2021 { 2086 2022 Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3)); … … 2122 2058 return VERR_VMX_VMENTRY_FAILED; 2123 2059 } 2124 bool const fVirtHost LongMode= RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);2060 bool const fVirtHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 2125 2061 bool const fNstGstLongModeActive = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LMA); 2126 2062 bool const fNstGstLongModeEnabled = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LME); 2127 if (fVirtHost LongMode == fNstGstLongModeActive == fNstGstLongModeEnabled)2063 if (fVirtHostInLongMode == fNstGstLongModeActive == fNstGstLongModeEnabled) 2128 2064 { /* likely */ } 2129 2065 else … … 2166 2102 2167 2103 /* SS cannot be 0 if 32-bit host. */ 2168 if ( fVirtHost LongMode2104 if ( fVirtHostInLongMode 2169 2105 || pVmcs->HostSs) 2170 2106 { /* likely */ } … … 2191 2127 return VERR_VMX_VMENTRY_FAILED; 2192 2128 } 2193 2194 /* 2195 * Host address-space size for 64-bit CPUs. 2196 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size". 2197 */ 2129 } 2130 2131 /* 2132 * Host address-space size for 64-bit CPUs. 2133 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size". 2134 */ 2135 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2136 { 2137 bool const fGstInLongMode = CPUMIsGuestInLongMode(pVCpu); 2138 bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2139 2140 /* Logical processor in IA-32e mode. */ 2141 if (fGstInLongMode) 2142 { 2143 if (fVirtHostInLongMode) 2144 { 2145 /* PAE must be set. */ 2146 if (pVmcs->u64HostCr4.u & X86_CR4_PAE) 2147 { /* likely */ } 2148 else 2149 { 2150 /* fail. */ 2151 } 2152 2153 /* RIP must be canonical. */ 2154 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u)) 2155 { /* likely */ } 2156 else 2157 { 2158 /* fail. */ 2159 } 2160 } 2161 else 2162 { 2163 /* fail. */ 2164 } 2165 } 2166 else 2167 { 2168 /* Logical processor is outside IA-32e mode. */ 2169 if ( !fNstGstInLongMode 2170 && !fVirtHostInLongMode) 2171 { 2172 /* PCIDE should not be set. */ 2173 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE)) 2174 { /* likely */ } 2175 else 2176 { 2177 } 2178 2179 /* Bits 63:32 of RIP MBZ. */ 2180 if (!pVmcs->u64HostRip.s.Hi) 2181 { /* likely */ } 2182 else 2183 { 2184 /* fail */ 2185 } 2186 } 2187 else 2188 { 2189 Log(("%s: Host/guest cannot be in long mode when logical processor is not in long mode\n", pszInstr)); 2190 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostGuestLongMode; 2191 return VERR_VMX_VMENTRY_FAILED; 2192 } 2193 } 2198 2194 } 2199 2195 else 2200 2196 { 2201 2197 /* Host address-space size for 32-bit CPUs. */ 2202 bool const fNstGst LongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);2203 if ( !fNstGst LongMode2204 && !fVirtHost LongMode)2198 bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2199 if ( !fNstGstInLongMode 2200 && !fVirtHostInLongMode) 2205 2201 { /* likely */ } 2206 2202 else 2207 2203 { 2208 2204 Log(("%s: Host/guest cannot be in long mode on 32-bit CPUs\n", pszInstr)); 2209 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostGuestLongMode ;2205 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu; 2210 2206 return VERR_VMX_VMENTRY_FAILED; 2211 2207 } … … 2329 2325 2330 2326 /* VM-entry MSR-load count and VM-entry MSR-load area address. */ 2331 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;2332 2327 if (pVmcs->u32EntryMsrLoadCount) 2333 2328 { 2334 2329 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 2335 || (pVmcs->u64AddrEntryMsrLoad.u >> cMaxPhysAddrWidth)2330 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2336 2331 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 2337 2332 { … … 2388 2383 2389 2384 /* VM-exit MSR-store count and VM-exit MSR-store area address. */ 2390 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;2391 2385 if (pVmcs->u32ExitMsrStoreCount) 2392 2386 { 2393 2387 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK) 2394 || (pVmcs->u64AddrExitMsrStore.u >> cMaxPhysAddrWidth)2388 || (pVmcs->u64AddrExitMsrStore.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2395 2389 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 2396 2390 { … … 2405 2399 { 2406 2400 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 2407 || (pVmcs->u64AddrExitMsrLoad.u >> cMaxPhysAddrWidth)2401 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2408 2402 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 2409 2403 { … … 2507 2501 2508 2502 /* IO bitmaps physical addresses. */ 2509 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;2510 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);2511 2503 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS) 2512 2504 { 2513 2505 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK) 2514 || (pVmcs->u64AddrIoBitmapA.u >> cMaxPhysAddrWidth)2506 || (pVmcs->u64AddrIoBitmapA.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2515 2507 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 2516 2508 { … … 2521 2513 2522 2514 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 2523 || (pVmcs->u64AddrIoBitmapB.u >> cMaxPhysAddrWidth)2515 || (pVmcs->u64AddrIoBitmapB.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2524 2516 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 2525 2517 { … … 2534 2526 { 2535 2527 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK) 2536 || (pVmcs->u64AddrMsrBitmap.u >> cMaxPhysAddrWidth)2528 || (pVmcs->u64AddrMsrBitmap.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2537 2529 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u)) 2538 2530 { … … 2549 2541 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 2550 2542 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK) 2551 || (GCPhysVirtApic >> cMaxPhysAddrWidth)2543 || (GCPhysVirtApic >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2552 2544 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic)) 2553 2545 { … … 2644 2636 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u; 2645 2637 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK) 2646 || (GCPhysApicAccess >> cMaxPhysAddrWidth)2638 || (GCPhysApicAccess >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2647 2639 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess)) 2648 2640 { … … 2693 2685 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u; 2694 2686 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK) 2695 || ( GCPhysVmreadBitmap >> cMaxPhysAddrWidth)2687 || ( GCPhysVmreadBitmap >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2696 2688 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap)) 2697 2689 { … … 2704 2696 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u; 2705 2697 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK) 2706 || ( GCPhysVmwriteBitmap >> cMaxPhysAddrWidth)2698 || ( GCPhysVmwriteBitmap >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu)) 2707 2699 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap)) 2708 2700 { … … 2833 2825 return rc; 2834 2826 } 2835 2836 /*2837 * Clear the high 32-bits of all natural-width fields in the VMCS if the guest2838 * does not support long mode.2839 */2840 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)2841 iemVmxVmcsFixNaturalWidthFields(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));2842 2827 2843 2828 /* -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r74097 r74113 1782 1782 } 1783 1783 1784 /* VMX (VMXON, VMCS region and related data structures') physical address width (depends on long-mode). */ 1785 pFeatures->cVmxMaxPhysAddrWidth = pFeatures->fLongMode ? pFeatures->cMaxPhysAddrWidth : 32; 1786 1784 1787 if ( pExtLeaf 1785 1788 && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD)
Note:
See TracChangeset
for help on using the changeset viewer.