Changeset 74429 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 24, 2018 5:08:48 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74421 r74429 342 342 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte2Rsvd , "HostPdpte2Rsvd" ), 343 343 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte3Rsvd , "HostPdpte3Rsvd" ), 344 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoad , "MsrLoad" ), 345 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadCount , "MsrLoadCount" ), 346 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadPtrReadPhys , "MsrLoadPtrReadPhys" ), 347 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadRing3 , "MsrLoadRing3" ), 348 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadRsvd , "MsrLoadRsvd" ), 344 349 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStore , "MsrStore" ), 345 350 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreCount , "MsrStoreCount" ), 346 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtr ReadPhys , "MsrStorePtrReadPhys"),351 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtrWritePhys , "MsrStorePtrWritePhys" ), 347 352 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRing3 , "MsrStoreRing3" ), 348 353 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRsvd , "MsrStoreRsvd" ) -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74423 r74429 1284 1284 1285 1285 /** 1286 * Checks if the given auto-load/store MSR area count is valid for the 1287 * implementation. 1288 * 1289 * @returns @c true if it's within the valid limit, @c false otherwise. 1290 * @param pVCpu The cross context virtual CPU structure. 1291 * @param uMsrCount The MSR area count to check. 1292 */ 1293 DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount) 1294 { 1295 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu); 1296 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr); 1297 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 1298 if (uMsrCount <= cMaxSupportedMsrs) 1299 return true; 1300 return false; 1301 } 1302 1303 1304 /** 1286 1305 * Flushes the current VMCS contents back to guest memory. 1287 1306 * … … 1314 1333 * VMREAD common (memory/register) instruction execution worker 1315 1334 * 1335 * @returns Strict VBox status code. 1316 1336 * @param pVCpu The cross context virtual CPU structure. 1317 1337 * @param cbInstr The instruction length. … … 1417 1437 * VMREAD (64-bit register) instruction execution worker. 1418 1438 * 1439 * @returns Strict VBox status code. 1419 1440 * @param pVCpu The cross context virtual CPU structure. 1420 1441 * @param cbInstr The instruction length. … … 1442 1463 * VMREAD (32-bit register) instruction execution worker. 1443 1464 * 1465 * @returns Strict VBox status code. 1444 1466 * @param pVCpu The cross context virtual CPU structure. 1445 1467 * @param cbInstr The instruction length. … … 1469 1491 * VMREAD (memory) instruction execution worker. 1470 1492 * 1493 * @returns Strict VBox status code. 1471 1494 * @param pVCpu The cross context virtual CPU structure. 1472 1495 * @param cbInstr The instruction length. … … 1521 1544 * VMWRITE instruction execution worker. 1522 1545 * 1546 * @returns Strict VBox status code. 1523 1547 * @param pVCpu The cross context virtual CPU structure. 1524 1548 * @param cbInstr The instruction length. … … 1668 1692 * VMCLEAR instruction execution worker. 1669 1693 * 1694 * @returns Strict VBox status code. 1670 1695 * @param pVCpu The cross context virtual CPU structure. 1671 1696 * @param cbInstr The instruction length. … … 1778 1803 * VMPTRST instruction execution worker. 1779 1804 * 1805 * @returns Strict VBox status code. 1780 1806 * @param pVCpu The cross context virtual CPU structure. 1781 1807 * @param cbInstr The instruction length. … … 1826 1852 * VMPTRLD instruction execution worker. 1827 1853 * 1854 * @returns Strict VBox status code. 1828 1855 * @param pVCpu The cross context virtual CPU structure. 1829 1856 * @param cbInstr The instruction length. … … 1957 1984 * VMXON instruction execution worker. 1958 1985 * 1986 * @returns Strict VBox status code. 1959 1987 * @param pVCpu The cross context virtual CPU structure. 1960 1988 * @param cbInstr The instruction length. … … 4108 4136 4109 4137 /** 4110 * Loads the guest auto-load MSRs areaas part of VM-entry.4138 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry. 4111 4139 * 4112 4140 * @returns VBox status code. … … 4124 4152 4125 4153 /* 4154 * The VM-entry MSR-load area address need not be a valid guest-physical address if the 4155 * VM-entry MSR load count is 0. If this is the case, bail early without reading it. 4156 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs". 4157 */ 4158 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount; 4159 if (!cMsrs) 4160 return VINF_SUCCESS; 4161 4162 /* 4126 4163 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is 4127 4164 * exceeded including possibly raising #MC exceptions during VMX transition. Our 4128 4165 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit. 4129 4166 */ 4130 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu); 4131 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr); 4132 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount; 4133 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 4134 if (cMsrs <= cMaxSupportedMsrs) 4167 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs); 4168 if (fIsMsrCountValid) 4135 4169 { /* likely */ } 4136 4170 else … … 4139 4173 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount); 4140 4174 } 4141 4142 /*4143 * The VM-entry MSR-load area address need not be a valid guest-physical address if the4144 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.4145 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".4146 */4147 if (cMsrs == 0)4148 return VINF_SUCCESS;4149 4175 4150 4176 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u; … … 4160 4186 && pMsr->u32Msr != MSR_K8_FS_BASE 4161 4187 && pMsr->u32Msr != MSR_K8_GS_BASE 4188 && pMsr->u32Msr != MSR_K6_EFER 4162 4189 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8 4163 4190 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL) … … 4416 4443 * VMLAUNCH/VMRESUME instruction execution worker. 4417 4444 * 4445 * @returns Strict VBox status code. 4418 4446 * @param pVCpu The cross context virtual CPU structure. 4419 4447 * @param cbInstr The instruction length. … … 4860 4888 4861 4889 /* 4890 * The VM-exit MSR-store area address need not be a valid guest-physical address if the 4891 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it. 4892 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs". 4893 */ 4894 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount; 4895 if (!cMsrs) 4896 return VINF_SUCCESS; 4897 4898 /* 4862 4899 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count 4863 4900 * is exceeded including possibly raising #MC exceptions during VMX transition. Our 4864 4901 * implementation causes a VMX-abort followed by a triple-fault. 4865 4902 */ 4866 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu); 4867 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr); 4868 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount; 4869 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 4870 if (cMsrs <= cMaxSupportedMsrs) 4903 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs); 4904 if (fIsMsrCountValid) 4871 4905 { /* likely */ } 4872 4906 else 4873 4907 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount); 4874 4908 4875 /*4876 * The VM-exit MSR-store area address need not be a valid guest-physical address if the4877 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.4878 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".4879 */4880 if (cMsrs == 0)4881 return VINF_SUCCESS;4882 4883 4909 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); 4884 4910 Assert(pMsr); … … 4896 4922 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit. 4897 4923 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort 4898 * recording the MSR index in a VirtualBox specific VMCSfield and indicated further by our4924 * recording the MSR index in the auxiliary info. field and indicated further by our 4899 4925 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0 4900 4926 * if possible, or come up with a better, generic solution. … … 4921 4947 { 4922 4948 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc)); 4923 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtr ReadPhys);4949 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys); 4924 4950 } 4925 4951 … … 5209 5235 5210 5236 /** 5237 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit. 5238 * 5239 * @returns VBox status code. 5240 * @param pVCpu The cross context virtual CPU structure. 5241 * @param pszInstr The VMX instruction name (for logging purposes). 5242 */ 5243 IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason) 5244 { 5245 /* 5246 * Load host MSRs. 5247 * See Intel spec. 27.6 "Loading MSRs". 5248 */ 5249 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 5250 const char *const pszFailure = "VMX-abort"; 5251 5252 /* 5253 * The VM-exit MSR-load area address need not be a valid guest-physical address if the 5254 * VM-exit MSR load count is 0. If this is the case, bail early without reading it. 5255 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs". 5256 */ 5257 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount; 5258 if (!cMsrs) 5259 return VINF_SUCCESS; 5260 5261 /* 5262 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count 5263 * is exceeded including possibly raising #MC exceptions during VMX transition. Our 5264 * implementation causes a VMX-abort followed by a triple-fault. 5265 */ 5266 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs); 5267 if (fIsMsrCountValid) 5268 { /* likely */ } 5269 else 5270 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount); 5271 5272 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u; 5273 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), 5274 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE); 5275 if (RT_SUCCESS(rc)) 5276 { 5277 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); 5278 Assert(pMsr); 5279 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) 5280 { 5281 if ( !pMsr->u32Reserved 5282 && pMsr->u32Msr != MSR_K8_FS_BASE 5283 && pMsr->u32Msr != MSR_K8_GS_BASE 5284 && pMsr->u32Msr != MSR_K6_EFER 5285 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8 5286 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL) 5287 { 5288 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value); 5289 if (rcStrict == VINF_SUCCESS) 5290 continue; 5291 5292 /* 5293 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit. 5294 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort 5295 * recording the MSR index in the auxiliary info. field and indicated further by our 5296 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0 5297 * if possible, or come up with a better, generic solution. 5298 */ 5299 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr; 5300 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE 5301 ? kVmxVDiag_Vmexit_MsrLoadRing3 5302 : kVmxVDiag_Vmexit_MsrLoad; 5303 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag); 5304 } 5305 else 5306 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd); 5307 } 5308 } 5309 else 5310 { 5311 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc)); 5312 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys); 5313 } 5314 5315 NOREF(uExitReason); 5316 NOREF(pszFailure); 5317 return VINF_SUCCESS; 5318 } 5319 5320 5321 /** 5211 5322 * Loads the host state as part of VM-exit. 5212 5323 * 5324 * @returns VBox status code. 5213 5325 * @param pVCpu The cross context virtual CPU structure. 5214 5326 * @param uExitReason The VM-exit reason (for logging purposes). … … 5276 5388 } 5277 5389 5278 /** @todo NSTVMX: rest of host state loading (loading MSRs). */ 5390 Assert(rcStrict == VINF_SUCCESS); 5391 5392 /* Load MSRs from the VM-exit auto-load MSR area. */ 5393 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason); 5394 if (RT_FAILURE(rc)) 5395 { 5396 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n")); 5397 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR); 5398 } 5279 5399 5280 5400 return VINF_SUCCESS; … … 5285 5405 * VMX VM-exit handler. 5286 5406 * 5407 * @returns Strict VBox status code. 5287 5408 * @param pVCpu The cross context virtual CPU structure. 5288 5409 * @param uExitReason The VM-exit reason.
Note:
See TracChangeset
for help on using the changeset viewer.