Changeset 76200 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Dec 13, 2018 9:23:47 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r76147 r76200 236 236 237 237 238 /**239 * Get fixed IA32_FEATURE_CONTROL value for NEM and cpumMsrRd_Ia32FeatureControl.240 *241 * @returns Fixed IA32_FEATURE_CONTROL value.242 * @param pVCpu The cross context per CPU structure.243 */244 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu)245 {246 /* Always report the MSR lock bit as set, in order to prevent guests from modifiying this MSR. */247 uint64_t fFeatCtl = MSR_IA32_FEATURE_CONTROL_LOCK;248 249 /* Report VMX features. */250 if (pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fVmx)251 fFeatCtl |= MSR_IA32_FEATURE_CONTROL_VMXON;252 253 return fFeatCtl;254 }255 256 238 /** @callback_method_impl{FNCPUMRDMSR} */ 257 239 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 258 240 { 259 241 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 260 *puValue = CPUMGetGuestIa32FeatureControl(pVCpu);242 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64FeatCtrl; 261 243 return VINF_SUCCESS; 262 244 } … … 1334 1316 1335 1317 1336 /**1337 * Gets IA32_VMX_BASIC for IEM and cpumMsrRd_Ia32VmxBasic.1338 *1339 * @returns IA32_VMX_BASIC value.1340 * @param pVCpu The cross context per CPU structure.1341 */1342 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu)1343 {1344 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;1345 uint64_t uVmxMsr;1346 if (pGuestFeatures->fVmx)1347 {1348 uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID )1349 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE )1350 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, !pGuestFeatures->fLongMode )1351 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 )1352 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB )1353 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS, pGuestFeatures->fVmxInsOutInfo)1354 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS, 0 );1355 }1356 else1357 uVmxMsr = 0;1358 return uVmxMsr;1359 }1360 1361 1362 1318 /** @callback_method_impl{FNCPUMRDMSR} */ 1363 1319 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 1364 1320 { 1365 1321 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1366 *puValue = CPUMGetGuestIa32VmxBasic(pVCpu); 1367 return VINF_SUCCESS; 1368 } 1369 1370 1371 /** 1372 * Gets IA32_VMX_PINBASED_CTLS for IEM and cpumMsrRd_Ia32VmxPinbasedCtls. 1373 * 1374 * @returns IA32_VMX_PINBASED_CTLS value. 1375 * @param pVCpu The cross context per CPU structure. 1376 */ 1377 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxPinbasedCtls(PVMCPU pVCpu) 1378 { 1379 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1380 uint64_t uVmxMsr; 1381 if (pGuestFeatures->fVmx) 1382 { 1383 uint32_t const fFeatures = (pGuestFeatures->fVmxExtIntExit << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT ) 1384 | (pGuestFeatures->fVmxNmiExit << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT ) 1385 | (pGuestFeatures->fVmxVirtNmi << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT ) 1386 | (pGuestFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT) 1387 | (pGuestFeatures->fVmxPostedInt << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT ); 1388 /* Set the default1 class bits. See Intel spec. A.3.1 "Pin-Based VM-Execution Controls". */ 1389 uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1; 1390 uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1; 1391 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", 1392 fAllowed0, fAllowed1, fFeatures)); 1393 uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1); 1394 LogRel(("fVmxExtIntExit=%u fFeatures=%#RX32 uVmxMsr=%#RX64\n", !!pGuestFeatures->fVmxExtIntExit, fFeatures, uVmxMsr)); 1395 } 1396 else 1397 uVmxMsr = 0; 1398 return uVmxMsr; 1322 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic; 1323 return VINF_SUCCESS; 1399 1324 } 1400 1325 … … 1404 1329 { 1405 1330 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1406 *puValue = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu); 1407 return VINF_SUCCESS; 1408 } 1409 1410 1411 /** 1412 * Gets IA32_VMX_PROCBASED_CTLS for IEM and cpumMsrRd_Ia32VmxProcbasedCtls. 1413 * 1414 * @returns IA32_VMX_PROCBASED_CTLS value. 1415 * @param pVCpu The cross context per CPU structure. 1416 */ 1417 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxProcbasedCtls(PVMCPU pVCpu) 1418 { 1419 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1420 uint64_t uVmxMsr; 1421 if (pGuestFeatures->fVmx) 1422 { 1423 uint32_t const fFeatures = (pGuestFeatures->fVmxIntWindowExit << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT ) 1424 | (pGuestFeatures->fVmxTscOffsetting << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT) 1425 | (pGuestFeatures->fVmxHltExit << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT ) 1426 | (pGuestFeatures->fVmxInvlpgExit << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT ) 1427 | (pGuestFeatures->fVmxMwaitExit << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT ) 1428 | (pGuestFeatures->fVmxRdpmcExit << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT ) 1429 | (pGuestFeatures->fVmxRdtscExit << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT ) 1430 | (pGuestFeatures->fVmxCr3LoadExit << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT ) 1431 | (pGuestFeatures->fVmxCr3StoreExit << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT ) 1432 | (pGuestFeatures->fVmxCr8LoadExit << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT ) 1433 | (pGuestFeatures->fVmxCr8StoreExit << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT ) 1434 | (pGuestFeatures->fVmxUseTprShadow << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT ) 1435 | (pGuestFeatures->fVmxNmiWindowExit << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT ) 1436 | (pGuestFeatures->fVmxMovDRxExit << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT ) 1437 | (pGuestFeatures->fVmxUncondIoExit << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT ) 1438 | (pGuestFeatures->fVmxUseIoBitmaps << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT ) 1439 | (pGuestFeatures->fVmxMonitorTrapFlag << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT ) 1440 | (pGuestFeatures->fVmxUseMsrBitmaps << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT ) 1441 | (pGuestFeatures->fVmxMonitorExit << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT ) 1442 | (pGuestFeatures->fVmxPauseExit << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT ) 1443 | (pGuestFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT); 1444 /* Set the default1 class bits. See Intel spec. A.3.2 "Primary Processor-Based VM-Execution Controls". */ 1445 uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1; 1446 uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1; 1447 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1448 fAllowed1, fFeatures)); 1449 uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1); 1450 } 1451 else 1452 uVmxMsr = 0; 1453 return uVmxMsr; 1454 } 1455 1331 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.PinCtls.u; 1332 return VINF_SUCCESS; 1333 } 1456 1334 1457 1335 /** @callback_method_impl{FNCPUMRDMSR} */ … … 1459 1337 { 1460 1338 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1461 *puValue = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu); 1462 return VINF_SUCCESS; 1463 } 1464 1465 1466 /** 1467 * Gets IA32_VMX_EXIT_CTLS for IEM and cpumMsrRd_Ia32VmxProcbasedCtls. 1468 * 1469 * @returns IA32_VMX_EXIT_CTLS value. 1470 * @param pVCpu The cross context per CPU structure. 1471 */ 1472 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxExitCtls(PVMCPU pVCpu) 1473 { 1474 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1475 uint64_t uVmxMsr; 1476 if (pGuestFeatures->fVmx) 1477 { 1478 uint32_t const fFeatures = (pGuestFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT ) 1479 | (pGuestFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT) 1480 | (pGuestFeatures->fVmxExitAckExtInt << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT ) 1481 | (pGuestFeatures->fVmxExitSavePatMsr << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT ) 1482 | (pGuestFeatures->fVmxExitLoadPatMsr << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT ) 1483 | (pGuestFeatures->fVmxExitSaveEferMsr << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT ) 1484 | (pGuestFeatures->fVmxExitLoadEferMsr << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT ) 1485 | (pGuestFeatures->fVmxSavePreemptTimer << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT ); 1486 /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */ 1487 uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1; 1488 uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1; 1489 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1490 fAllowed1, fFeatures)); 1491 uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1); 1492 } 1493 else 1494 uVmxMsr = 0; 1495 return uVmxMsr; 1339 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ProcCtls.u; 1340 return VINF_SUCCESS; 1496 1341 } 1497 1342 … … 1501 1346 { 1502 1347 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1503 *puValue = CPUMGetGuestIa32VmxExitCtls(pVCpu); 1504 return VINF_SUCCESS; 1505 } 1506 1507 1508 /** 1509 * Gets IA32_VMX_ENTRY_CTLS for IEM and cpumMsrRd_Ia32VmxEntryCtls. 1510 * 1511 * @returns IA32_VMX_ENTRY_CTLS value. 1512 * @param pVCpu The cross context per CPU structure. 1513 */ 1514 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxEntryCtls(PVMCPU pVCpu) 1515 { 1516 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1517 uint64_t uVmxMsr; 1518 if (pGuestFeatures->fVmx) 1519 { 1520 uint32_t const fFeatures = (pGuestFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT ) 1521 | (pGuestFeatures->fVmxIa32eModeGuest << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT) 1522 | (pGuestFeatures->fVmxEntryLoadEferMsr << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT ) 1523 | (pGuestFeatures->fVmxEntryLoadPatMsr << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT ); 1524 /* Set the default1 class bits. See Intel spec. A.5 "VM-entry Controls". */ 1525 uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1; 1526 uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1; 1527 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1528 fAllowed1, fFeatures)); 1529 uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1); 1530 } 1531 else 1532 uVmxMsr = 0; 1533 return uVmxMsr; 1348 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ExitCtls.u; 1349 return VINF_SUCCESS; 1534 1350 } 1535 1351 … … 1539 1355 { 1540 1356 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1541 *puValue = CPUMGetGuestIa32VmxEntryCtls(pVCpu); 1542 return VINF_SUCCESS; 1543 } 1544 1545 1546 /** 1547 * Gets IA32_VMX_MISC for IEM and cpumMsrRd_Ia32VmxMisc. 1548 * 1549 * @returns IA32_VMX_MISC MSR. 1550 * @param pVCpu The cross context per CPU structure. 1551 */ 1552 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxMisc(PVMCPU pVCpu) 1553 { 1554 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1555 uint64_t uVmxMsr; 1556 if (pGuestFeatures->fVmx) 1557 { 1558 uint64_t uHostMsr; 1559 int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_MISC, &uHostMsr); 1560 AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc); 1561 uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX); 1562 uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK; 1563 uVmxMsr = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC, VMX_V_PREEMPT_TIMER_SHIFT ) 1564 | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA, pGuestFeatures->fVmxExitSaveEferLma ) 1565 | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES, fActivityState ) 1566 | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT, pGuestFeatures->fVmxIntelPt ) 1567 | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR, 0 ) 1568 | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET, VMX_V_CR3_TARGET_COUNT ) 1569 | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS, cMaxMsrs ) 1570 | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI, 0 ) 1571 | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL, pGuestFeatures->fVmxVmwriteAll ) 1572 | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT, pGuestFeatures->fVmxEntryInjectSoftInt) 1573 | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID, VMX_V_MSEG_REV_ID ); 1574 } 1575 else 1576 uVmxMsr = 0; 1577 return uVmxMsr; 1578 } 1357 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.EntryCtls.u; 1358 return VINF_SUCCESS; 1359 } 1360 1579 1361 1580 1362 … … 1583 1365 { 1584 1366 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1585 *puValue = CPUMGetGuestIa32VmxMisc(pVCpu); 1586 return VINF_SUCCESS; 1587 } 1588 1589 1590 /** 1591 * Gets IA32_VMX_CR0_FIXED0 for IEM and cpumMsrRd_Ia32VmxMisc. 1592 * 1593 * @returns IA32_VMX_CR0_FIXED0 value. 1594 * @param pVCpu The cross context per CPU structure. 1595 */ 1596 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr0Fixed0(PVMCPU pVCpu) 1597 { 1598 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1599 if (pGuestFeatures->fVmx) 1600 { 1601 uint64_t const uVmxMsr = pGuestFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0; 1602 return uVmxMsr; 1603 } 1604 return 0; 1367 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Misc; 1368 return VINF_SUCCESS; 1605 1369 } 1606 1370 … … 1610 1374 { 1611 1375 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1612 *puValue = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu); 1613 return VINF_SUCCESS; 1614 } 1615 1616 1617 /** 1618 * Gets IA32_VMX_CR0_FIXED1 for IEM and cpumMsrRd_Ia32VmxMisc. 1619 * 1620 * @returns IA32_VMX_CR0_FIXED1 MSR. 1621 * @param pVCpu The cross context per CPU structure. 1622 */ 1623 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr0Fixed1(PVMCPU pVCpu) 1624 { 1625 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1626 uint64_t uVmxMsr; 1627 if (pGuestFeatures->fVmx) 1628 { 1629 int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_CR0_FIXED1, &uVmxMsr); 1630 AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc); 1631 uVmxMsr |= VMX_V_CR0_FIXED0; /* Make sure the CR0 MB1 bits are not clear. */ 1632 } 1633 else 1634 uVmxMsr = 0; 1635 return uVmxMsr; 1376 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr0Fixed0; 1377 return VINF_SUCCESS; 1636 1378 } 1637 1379 … … 1641 1383 { 1642 1384 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1643 Assert(idMsr == MSR_IA32_VMX_CR0_FIXED1); 1644 *puValue = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu); 1645 return VINF_SUCCESS; 1646 } 1647 1648 1649 /** 1650 * Gets IA32_VMX_CR4_FIXED0 for IEM and cpumMsrRd_Ia32VmxCr4Fixed0. 1651 * 1652 * @returns IA32_VMX_CR4_FIXED0 value. 1653 * @param pVCpu The cross context per CPU structure. 1654 */ 1655 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr4Fixed0(PVMCPU pVCpu) 1656 { 1657 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1658 uint64_t const uVmxMsr = pGuestFeatures->fVmx ? VMX_V_CR4_FIXED0 : 0; 1659 return uVmxMsr; 1385 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr0Fixed1; 1386 return VINF_SUCCESS; 1660 1387 } 1661 1388 … … 1665 1392 { 1666 1393 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1667 *puValue = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu); 1668 return VINF_SUCCESS; 1669 } 1670 1671 1672 /** 1673 * Gets IA32_VMX_CR4_FIXED1 for IEM and cpumMsrRd_Ia32VmxCr4Fixed1. 1674 * 1675 * @returns IA32_VMX_CR4_FIXED1 MSR. 1676 * @param pVCpu The cross context per CPU structure. 1677 */ 1678 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr4Fixed1(PVMCPU pVCpu) 1679 { 1680 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1681 uint64_t uVmxMsr; 1682 if (pGuestFeatures->fVmx) 1683 { 1684 int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_CR4_FIXED1, &uVmxMsr); 1685 AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc); 1686 uVmxMsr |= VMX_V_CR4_FIXED0; /* Make sure the CR4 MB1 bits are not clear. */ 1687 } 1688 else 1689 uVmxMsr = 0; 1690 return uVmxMsr; 1394 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr4Fixed0; 1395 return VINF_SUCCESS; 1691 1396 } 1692 1397 … … 1696 1401 { 1697 1402 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1698 Assert(idMsr == MSR_IA32_VMX_CR4_FIXED1); 1699 *puValue = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu); 1700 return VINF_SUCCESS; 1701 } 1702 1703 1704 /** 1705 * Gets IA32_VMX_VMCS_ENUM for IEM and cpumMsrRd_Ia32VmxVmcsEnum. 1706 * 1707 * @returns IA32_VMX_VMCS_ENUM value. 1708 * @param pVCpu The cross context per CPU structure. 1709 */ 1710 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxVmcsEnum(PVMCPU pVCpu) 1711 { 1712 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1713 uint64_t uVmxMsr; 1714 if (pGuestFeatures->fVmx) 1715 uVmxMsr = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT; 1716 else 1717 uVmxMsr = 0; 1718 return uVmxMsr; 1403 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr4Fixed1; 1404 return VINF_SUCCESS; 1719 1405 } 1720 1406 … … 1724 1410 { 1725 1411 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1726 *puValue = CPUMGetGuestIa32VmxVmcsEnum(pVCpu); 1727 return VINF_SUCCESS; 1728 } 1729 1730 1731 /** 1732 * Gets MSR_IA32_VMX_PROCBASED_CTLS2 for IEM and cpumMsrRd_Ia32VmxProcBasedCtls2. 1733 * 1734 * @returns MSR_IA32_VMX_PROCBASED_CTLS2 value. 1735 * @param pVCpu The cross context per CPU structure. 1736 */ 1737 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxProcbasedCtls2(PVMCPU pVCpu) 1738 { 1739 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1740 uint64_t uVmxMsr; 1741 if ( pGuestFeatures->fVmx 1742 && pGuestFeatures->fVmxSecondaryExecCtls) 1743 { 1744 uint32_t const fFeatures = (pGuestFeatures->fVmxVirtApicAccess << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT ) 1745 | (pGuestFeatures->fVmxEpt << VMX_BF_PROC_CTLS2_EPT_SHIFT ) 1746 | (pGuestFeatures->fVmxDescTableExit << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT ) 1747 | (pGuestFeatures->fVmxRdtscp << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT ) 1748 | (pGuestFeatures->fVmxVirtX2ApicMode << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT ) 1749 | (pGuestFeatures->fVmxVpid << VMX_BF_PROC_CTLS2_VPID_SHIFT ) 1750 | (pGuestFeatures->fVmxWbinvdExit << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT ) 1751 | (pGuestFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT) 1752 | (pGuestFeatures->fVmxApicRegVirt << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT ) 1753 | (pGuestFeatures->fVmxVirtIntDelivery << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT ) 1754 | (pGuestFeatures->fVmxPauseLoopExit << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT ) 1755 | (pGuestFeatures->fVmxRdrandExit << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT ) 1756 | (pGuestFeatures->fVmxInvpcid << VMX_BF_PROC_CTLS2_INVPCID_SHIFT ) 1757 | (pGuestFeatures->fVmxVmFunc << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT ) 1758 | (pGuestFeatures->fVmxVmcsShadowing << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT ) 1759 | (pGuestFeatures->fVmxRdseedExit << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT ) 1760 | (pGuestFeatures->fVmxPml << VMX_BF_PROC_CTLS2_PML_SHIFT ) 1761 | (pGuestFeatures->fVmxEptXcptVe << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT ) 1762 | (pGuestFeatures->fVmxXsavesXrstors << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT ) 1763 | (pGuestFeatures->fVmxUseTscScaling << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT ); 1764 /* No default1 class bits. A.3.3 "Secondary Processor-Based VM-Execution Controls". */ 1765 uint32_t const fAllowed0 = 0; 1766 uint32_t const fAllowed1 = fFeatures; 1767 uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1); 1768 } 1769 else 1770 uVmxMsr = 0; 1771 return uVmxMsr; 1412 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmcsEnum; 1413 return VINF_SUCCESS; 1772 1414 } 1773 1415 … … 1777 1419 { 1778 1420 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1779 *puValue = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);1421 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ProcCtls2.u; 1780 1422 return VINF_SUCCESS; 1781 1423 } … … 1827 1469 1828 1470 1829 /**1830 * Gets IA32_VMX_VMFUNC for IEM and cpumMsrRd_Ia32VmxVmFunc.1831 *1832 * @returns IA32_VMX_VMFUNC value.1833 * @param pVCpu The cross context per CPU structure.1834 */1835 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxVmFunc(PVMCPU pVCpu)1836 {1837 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;1838 uint64_t uVmxMsr;1839 if ( pGuestFeatures->fVmx1840 && pGuestFeatures->fVmxVmFunc)1841 uVmxMsr = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);1842 else1843 uVmxMsr = 0;1844 return uVmxMsr;1845 }1846 1847 1848 1471 /** @callback_method_impl{FNCPUMRDMSR} */ 1849 1472 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxVmFunc(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 1850 1473 { 1851 1474 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1852 *puValue = CPUMGetGuestIa32VmxVmFunc(pVCpu);1475 *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc; 1853 1476 return VINF_SUCCESS; 1854 1477 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r75680 r76200 5621 5621 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) 5622 5622 { 5623 uint32_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);5623 uint32_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 5624 5624 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0) 5625 5625 { … … 5628 5628 } 5629 5629 5630 uint32_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);5630 uint32_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 5631 5631 if (uNewCrX & ~uCr0Fixed1) 5632 5632 { … … 5855 5855 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) 5856 5856 { 5857 uint32_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);5857 uint32_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 5858 5858 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0) 5859 5859 { … … 5862 5862 } 5863 5863 5864 uint32_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);5864 uint32_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 5865 5865 if (uNewCrX & ~uCr4Fixed1) 5866 5866 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r76198 r76200 559 559 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH: 560 560 { 561 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);561 uint64_t const uVmFuncMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64VmFunc; 562 562 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING)); 563 563 } … … 1175 1175 DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount) 1176 1176 { 1177 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);1177 uint64_t const u64VmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc; 1178 1178 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr); 1179 1179 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); … … 2072 2072 { 2073 2073 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */ 2074 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);2074 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 2075 2075 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0; 2076 2076 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u; … … 2083 2083 { 2084 2084 /* CR4 MB1 bits are not modified. */ 2085 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);2085 uint64_t const fCr4IgnMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 2086 2086 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u; 2087 2087 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4; … … 5046 5046 { 5047 5047 /* CR0 MB1 bits. */ 5048 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);5048 uint64_t u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 5049 5049 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD))); 5050 5050 if (fUnrestrictedGuest) … … 5054 5054 5055 5055 /* CR0 MBZ bits. */ 5056 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);5056 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 5057 5057 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1) 5058 5058 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1); … … 5068 5068 { 5069 5069 /* CR4 MB1 bits. */ 5070 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);5070 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 5071 5071 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0) 5072 5072 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0); 5073 5073 5074 5074 /* CR4 MBZ bits. */ 5075 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);5075 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 5076 5076 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1) 5077 5077 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1); … … 5682 5682 * Activity state. 5683 5683 */ 5684 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);5684 uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc; 5685 5685 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES); 5686 5686 if (!(pVmcs->u32GuestActivityState & fActivityStateMask)) … … 6035 6035 { 6036 6036 /* CR0 MB1 bits. */ 6037 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);6037 uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 6038 6038 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0) 6039 6039 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0); 6040 6040 6041 6041 /* CR0 MBZ bits. */ 6042 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);6042 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 6043 6043 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1) 6044 6044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1); … … 6048 6048 { 6049 6049 /* CR4 MB1 bits. */ 6050 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);6050 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 6051 6051 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0) 6052 6052 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0); 6053 6053 6054 6054 /* CR4 MBZ bits. */ 6055 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);6055 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 6056 6056 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1) 6057 6057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1); … … 6225 6225 6226 6226 /* VM-entry controls. */ 6227 VMXCTLSMSR EntryCtls; 6228 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu); 6227 VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls; 6229 6228 if (~pVmcs->u32EntryCtls & EntryCtls.n.allowed0) 6230 6229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0); … … 6328 6327 6329 6328 /* VM-exit controls. */ 6330 VMXCTLSMSR ExitCtls; 6331 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu); 6329 VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls; 6332 6330 if (~pVmcs->u32ExitCtls & ExitCtls.n.allowed0) 6333 6331 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0); … … 6383 6381 /* Pin-based VM-execution controls. */ 6384 6382 { 6385 VMXCTLSMSR PinCtls; 6386 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu); 6383 VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls; 6387 6384 if (~pVmcs->u32PinCtls & PinCtls.n.allowed0) 6388 6385 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0); … … 6394 6391 /* Processor-based VM-execution controls. */ 6395 6392 { 6396 VMXCTLSMSR ProcCtls; 6397 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu); 6393 VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls; 6398 6394 if (~pVmcs->u32ProcCtls & ProcCtls.n.allowed0) 6399 6395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0); … … 6406 6402 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 6407 6403 { 6408 VMXCTLSMSR ProcCtls2; 6409 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu); 6404 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2; 6410 6405 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0) 6411 6406 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0); … … 8159 8154 { 8160 8155 /* CR0 MB1 bits. */ 8161 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);8156 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 8162 8157 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0) 8163 8158 { … … 8168 8163 8169 8164 /* CR0 MBZ bits. */ 8170 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);8165 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 8171 8166 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1) 8172 8167 { … … 8180 8175 { 8181 8176 /* CR4 MB1 bits. */ 8182 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);8177 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 8183 8178 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0) 8184 8179 { … … 8189 8184 8190 8185 /* CR4 MBZ bits. */ 8191 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);8186 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 8192 8187 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1) 8193 8188 { … … 8199 8194 8200 8195 /* Feature control MSR's LOCK and VMXON bits. */ 8201 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu); 8202 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))) 8196 uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl; 8197 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)) 8198 != (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)) 8203 8199 { 8204 8200 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
Note:
See TracChangeset
for help on using the changeset viewer.