Changeset 74187 in vbox
- Timestamp:
- Sep 11, 2018 8:27:27 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r74183 r74187 3579 3579 kVmxVDiag_Vmentry_GuestPatMsr, 3580 3580 kVmxVDiag_Vmentry_GuestPcide, 3581 kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys, 3582 kVmxVDiag_Vmentry_GuestPdpte0Rsvd, 3583 kVmxVDiag_Vmentry_GuestPdpte1Rsvd, 3584 kVmxVDiag_Vmentry_GuestPdpte2Rsvd, 3585 kVmxVDiag_Vmentry_GuestPdpte3Rsvd, 3581 3586 kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf, 3582 3587 kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf, -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74183 r74187 181 181 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ), 182 182 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPcide , "GuestPcide" ), 183 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys , "GuestPdpteCr3ReadPhys" ), 184 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte0Rsvd , "GuestPdpte0Rsvd" ), 185 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte1Rsvd , "GuestPdpte1Rsvd" ), 186 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte2Rsvd , "GuestPdpte2Rsvd" ), 187 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte3Rsvd , "GuestPdpte3Rsvd" ), 183 188 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf , "GuestPndDbgXcptBsNoTf" ), 184 189 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf , "GuestPndDbgXcptBsTf" ), -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74185 r74187 709 709 710 710 /** 711 * Gets a segment register from the VMCS given its index.711 * Gets a guest segment register from the VMCS given its index. 712 712 * 713 713 * @returns VBox status code. … … 2065 2065 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs; 2066 2066 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs; 2067 default: return kVmxVDiag_Vmentry_GuestSegBaseSs; 2067 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs; 2068 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2068 2069 } 2069 2070 } … … 2085 2086 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs; 2086 2087 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs; 2087 default: 2088 Assert(iSegReg == X86_SREG_SS); 2089 return kVmxVDiag_Vmentry_GuestSegBaseV86Ss; 2088 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss; 2089 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2090 2090 } 2091 2091 } … … 2107 2107 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs; 2108 2108 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs; 2109 default: 2110 Assert(iSegReg == X86_SREG_SS); 2111 return kVmxVDiag_Vmentry_GuestSegLimitV86Ss; 2109 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss; 2110 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2112 2111 } 2113 2112 } … … 2129 2128 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs; 2130 2129 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs; 2131 default: 2132 Assert(iSegReg == X86_SREG_SS); 2133 return kVmxVDiag_Vmentry_GuestSegAttrV86Ss; 2130 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss; 2131 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2134 2132 } 2135 2133 } … … 2151 2149 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs; 2152 2150 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs; 2153 default: 2154 Assert(iSegReg == X86_SREG_SS); 2155 return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs; 2151 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs; 2152 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2156 2153 } 2157 2154 } … … 2173 2170 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs; 2174 2171 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs; 2175 default: 2176 Assert(iSegReg == X86_SREG_SS); 2177 return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs; 2172 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs; 2173 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2178 2174 } 2179 2175 } … … 2195 2191 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs; 2196 2192 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs; 2197 default: 2198 Assert(iSegReg == X86_SREG_SS); 2199 return kVmxVDiag_Vmentry_GuestSegAttrPresentSs; 2193 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs; 2194 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2200 2195 } 2201 2196 } … … 2217 2212 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs; 2218 2213 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs; 2219 default: 2220 Assert(iSegReg == X86_SREG_SS); 2221 return kVmxVDiag_Vmentry_GuestSegAttrGranSs; 2214 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs; 2215 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2222 2216 } 2223 2217 } … … 2238 2232 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs; 2239 2233 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs; 2240 default: 2241 Assert(iSegReg == X86_SREG_SS); 2242 return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs; 2234 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs; 2235 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2243 2236 } 2244 2237 } … … 2260 2253 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs; 2261 2254 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs; 2262 default: 2263 Assert(iSegReg == X86_SREG_SS); 2264 return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs; 2255 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs; 2256 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2257 } 2258 } 2259 2260 2261 /** 2262 * Gets the instruction diagnostic for CR3 referenced PDPTE reserved bits failure 2263 * during VM-entry of a nested-guest. 2264 * 2265 * @param iSegReg The PDPTE entry index. 2266 */ 2267 IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte) 2268 { 2269 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES); 2270 switch (iPdpte) 2271 { 2272 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd; 2273 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd; 2274 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd; 2275 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd; 2276 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2); 2265 2277 } 2266 2278 } … … 2872 2884 : pVmcs->u64GuestRFlags.s.Lo; 2873 2885 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)) 2874 && (uGuestRFlags & 2886 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK) 2875 2887 { /* likely */ } 2876 2888 else … … 3128 3140 3129 3141 /** 3142 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of 3143 * VM-entry. 3144 * 3145 * @returns @c true if all PDPTEs are valid, @c false otherwise. 3146 * @param pVCpu The cross context virtual CPU structure. 3147 * @param pszInstr The VMX instruction name (for logging purposes). 3148 * @param pVmcs Pointer to the virtual VMCS. 3149 */ 3150 IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs) 3151 { 3152 /* 3153 * Check PDPTEs. 3154 * See Intel spec. 4.4.1 "PDPTE Registers". 3155 */ 3156 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK; 3157 const char *const pszFailure = "VM-exit"; 3158 3159 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES]; 3160 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes)); 3161 if (RT_SUCCESS(rc)) 3162 { 3163 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++) 3164 { 3165 if ( !(aPdptes[iPdpte].u & X86_PDPE_P) 3166 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK)) 3167 { /* likely */ } 3168 else 3169 { 3170 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE; 3171 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte); 3172 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 3173 } 3174 } 3175 } 3176 else 3177 { 3178 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE; 3179 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys); 3180 } 3181 3182 NOREF(pszFailure); 3183 return rc; 3184 } 3185 3186 3187 /** 3188 * Checks guest PDPTEs as part of VM-entry. 3189 * 3190 * @param pVCpu The cross context virtual CPU structure. 3191 * @param pszInstr The VMX instruction name (for logging purposes). 3192 */ 3193 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr) 3194 { 3195 /* 3196 * Guest PDPTEs. 3197 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries". 3198 */ 3199 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3200 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 3201 3202 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */ 3203 int rc; 3204 if ( !fGstInLongMode 3205 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE) 3206 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)) 3207 { 3208 /* 3209 * We don't support nested-paging for nested-guests yet. 3210 * 3211 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used, 3212 * rather we need to check the PDPTEs referenced by the guest CR3. 3213 */ 3214 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs); 3215 } 3216 else 3217 rc = VINF_SUCCESS; 3218 return rc; 3219 } 3220 3221 3222 /** 3130 3223 * Checks guest-state as part of VM-entry. 3131 3224 * … … 3161 3254 3162 3255 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr); 3256 if (rc == VINF_SUCCESS) 3257 { /* likely */ } 3258 else 3259 return rc; 3260 3261 rc = iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr); 3163 3262 if (rc == VINF_SUCCESS) 3164 3263 { /* likely */ }
Note:
See TracChangeset
for help on using the changeset viewer.