Changeset 73293 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Jul 21, 2018 3:11:53 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/HM.cpp
r73292 r73293 1230 1230 { 1231 1231 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit)); 1232 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64Feat ureCtrl));1232 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatCtrl)); 1233 1233 switch (pVM->hm.s.rcInit) 1234 1234 { … … 1353 1353 1354 1354 /** 1355 * Finish VT-x initialization (after ring-0 init). 1356 * 1357 * @returns VBox status code. 1358 * @param pVM The cross context VM structure. 1359 */ 1360 static int hmR3InitFinalizeR0Intel(PVM pVM) 1361 { 1362 int rc; 1363 1364 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 1365 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatureCtrl != 0, VERR_HM_IPE_4); 1366 1367 uint64_t val; 1368 uint64_t zap; 1369 1370 LogRel(("HM: Using VT-x implementation 2.0\n")); 1371 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1372 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer)); 1373 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1374 1375 val = pVM->hm.s.vmx.Msrs.u64FeatureCtrl; 1376 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val)); 1355 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log. 1356 * 1357 * @param fFeatMsr The feature control MSR value. 1358 */ 1359 static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr) 1360 { 1361 uint64_t const val = fFeatMsr; 1362 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val)); 1377 1363 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK); 1378 1364 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON); … … 1389 1375 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN); 1390 1376 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE); 1391 if (!( pVM->hm.s.vmx.Msrs.u64FeatureCtrl & MSR_IA32_FEATURE_CONTROL_LOCK))1377 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK)) 1392 1378 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n")); 1393 1394 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Basic)); 1395 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_VMCS_ID(pVM->hm.s.vmx.Msrs.u64Basic))); 1396 LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64Basic))); 1397 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.Msrs.u64Basic) ? "< 4 GB" : "None")); 1398 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(pVM->hm.s.vmx.Msrs.u64Basic))); 1399 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", MSR_IA32_VMX_BASIC_DUAL_MON(pVM->hm.s.vmx.Msrs.u64Basic))); 1400 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64Basic))); 1401 LogRel(("HM: Supports true capability MSRs = %RTbool\n", MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic))); 1402 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1403 1404 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxPinCtls.u)); 1405 val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; 1406 zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; 1379 } 1380 1381 1382 /** 1383 * Reports MSR_IA32_VMX_BASIC MSR to the log. 1384 * 1385 * @param uBasicMsr The VMX basic MSR value. 1386 */ 1387 static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr) 1388 { 1389 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr)); 1390 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_VMCS_ID(uBasicMsr))); 1391 LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(uBasicMsr))); 1392 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(uBasicMsr) ? "< 4 GB" 1393 : "None")); 1394 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr))); 1395 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", MSR_IA32_VMX_BASIC_DUAL_MON(uBasicMsr))); 1396 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(uBasicMsr))); 1397 LogRel(("HM: Supports true capability MSRs = %RTbool\n", MSR_IA32_VMX_BASIC_TRUE_CONTROLS(uBasicMsr))); 1398 } 1399 1400 1401 /** 1402 * Reports MSR_IA32_PINBASED_CTLS to the log. 1403 * 1404 * @param pVmxMsr Pointer to the VMX MSR. 1405 */ 1406 static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1407 { 1408 uint64_t const val = pVmxMsr->n.allowed1; 1409 uint64_t const zap = pVmxMsr->n.disallowed0; 1410 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u)); 1407 1411 HMVMX_REPORT_FEAT(val, zap, "EXT_INT_EXIT", VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT); 1408 1412 HMVMX_REPORT_FEAT(val, zap, "NMI_EXIT", VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT); … … 1410 1414 HMVMX_REPORT_FEAT(val, zap, "PREEMPT_TIMER", VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 1411 1415 HMVMX_REPORT_FEAT(val, zap, "POSTED_INTR", VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR); 1412 1413 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls.u)); 1414 val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; 1415 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; 1416 } 1417 1418 1419 /** 1420 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log. 1421 * 1422 * @param pVmxMsr Pointer to the VMX MSR. 1423 */ 1424 static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1425 { 1426 uint64_t const val = pVmxMsr->n.allowed1; 1427 uint64_t const zap = pVmxMsr->n.disallowed0; 1428 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u)); 1416 1429 HMVMX_REPORT_FEAT(val, zap, "INT_WINDOW_EXIT", VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT); 1417 1430 HMVMX_REPORT_FEAT(val, zap, "USE_TSC_OFFSETTING", VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING); … … 1435 1448 HMVMX_REPORT_FEAT(val, zap, "PAUSE_EXIT", VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT); 1436 1449 HMVMX_REPORT_FEAT(val, zap, "USE_SECONDARY_EXEC_CTRL", VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL); 1437 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1438 { 1439 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.u)); 1440 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; 1441 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; 1442 HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 1443 HMVMX_REPORT_FEAT(val, zap, "EPT", VMX_VMCS_CTRL_PROC_EXEC2_EPT); 1444 HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT); 1445 HMVMX_REPORT_FEAT(val, zap, "RDTSCP", VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP); 1446 HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC); 1447 HMVMX_REPORT_FEAT(val, zap, "VPID", VMX_VMCS_CTRL_PROC_EXEC2_VPID); 1448 HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT); 1449 HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST", VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST); 1450 HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT", VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 1451 HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 1452 HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT); 1453 HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT); 1454 HMVMX_REPORT_FEAT(val, zap, "INVPCID", VMX_VMCS_CTRL_PROC_EXEC2_INVPCID); 1455 HMVMX_REPORT_FEAT(val, zap, "VMFUNC", VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC); 1456 HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING", VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING); 1457 HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT); 1458 HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT); 1459 HMVMX_REPORT_FEAT(val, zap, "PML", VMX_VMCS_CTRL_PROC_EXEC2_PML); 1460 HMVMX_REPORT_FEAT(val, zap, "EPT_VE", VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE); 1461 HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT", VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT); 1462 HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS", VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS); 1463 HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING", VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING); 1464 } 1465 1466 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxEntry.u)); 1467 val = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; 1468 zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; 1450 } 1451 1452 1453 /** 1454 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log. 1455 * 1456 * @param pVmxMsr Pointer to the VMX MSR. 1457 */ 1458 static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr) 1459 { 1460 uint64_t const val = pVmxMsr->n.allowed1; 1461 uint64_t const zap = pVmxMsr->n.disallowed0; 1462 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u)); 1463 HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 1464 HMVMX_REPORT_FEAT(val, zap, "EPT", VMX_VMCS_CTRL_PROC_EXEC2_EPT); 1465 HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT); 1466 HMVMX_REPORT_FEAT(val, zap, "RDTSCP", VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP); 1467 HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC); 1468 HMVMX_REPORT_FEAT(val, zap, "VPID", VMX_VMCS_CTRL_PROC_EXEC2_VPID); 1469 HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT); 1470 HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST", VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST); 1471 HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT", VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 1472 HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 1473 HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT); 1474 HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT); 1475 HMVMX_REPORT_FEAT(val, zap, "INVPCID", VMX_VMCS_CTRL_PROC_EXEC2_INVPCID); 1476 HMVMX_REPORT_FEAT(val, zap, "VMFUNC", VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC); 1477 HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING", VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING); 1478 HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT); 1479 HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT); 1480 HMVMX_REPORT_FEAT(val, zap, "PML", VMX_VMCS_CTRL_PROC_EXEC2_PML); 1481 HMVMX_REPORT_FEAT(val, zap, "EPT_VE", VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE); 1482 HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT", VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT); 1483 HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS", VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS); 1484 HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING", VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING); 1485 } 1486 1487 1488 /** 1489 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log. 1490 * 1491 * @param pVmxMsr Pointer to the VMX MSR. 1492 */ 1493 static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1494 { 1495 uint64_t const val = pVmxMsr->n.allowed1; 1496 uint64_t const zap = pVmxMsr->n.disallowed0; 1497 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u)); 1469 1498 HMVMX_REPORT_FEAT(val, zap, "LOAD_DEBUG", VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG); 1470 1499 HMVMX_REPORT_FEAT(val, zap, "IA32E_MODE_GUEST", VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST); … … 1474 1503 HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_PAT_MSR", VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR); 1475 1504 HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_EFER_MSR", VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR); 1476 1477 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxExit.u)); 1478 val = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; 1479 zap = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; 1505 } 1506 1507 1508 /** 1509 * Reports MSR_IA32_VMX_EXIT_CTLS to the log. 1510 * 1511 * @param pVmxMsr Pointer to the VMX MSR. 1512 */ 1513 static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1514 { 1515 uint64_t const val = pVmxMsr->n.allowed1; 1516 uint64_t const zap = pVmxMsr->n.disallowed0; 1517 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u)); 1480 1518 HMVMX_REPORT_FEAT(val, zap, "SAVE_DEBUG", VMX_VMCS_CTRL_EXIT_SAVE_DEBUG); 1481 1519 HMVMX_REPORT_FEAT(val, zap, "HOST_ADDR_SPACE_SIZE", VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE); … … 1487 1525 HMVMX_REPORT_FEAT(val, zap, "LOAD_HOST_EFER_MSR", VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR); 1488 1526 HMVMX_REPORT_FEAT(val, zap, "SAVE_VMX_PREEMPT_TIMER", VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER); 1489 1527 } 1528 1529 1530 /** 1531 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log. 1532 * 1533 * @param fCaps The VMX EPT/VPID capability MSR value. 1534 */ 1535 static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps) 1536 { 1537 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps)); 1538 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); 1539 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4); 1540 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC); 1541 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB); 1542 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M); 1543 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G); 1544 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT); 1545 HMVMX_REPORT_MSR_CAP(fCaps, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY); 1546 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT); 1547 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1548 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID); 1549 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 1550 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT); 1551 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS); 1552 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS); 1553 } 1554 1555 1556 /** 1557 * Reports MSR_IA32_VMX_MISC MSR to the log. 1558 * 1559 * @param fMisc The VMX misc. MSR value. 1560 */ 1561 static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc) 1562 { 1563 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc)); 1564 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc) == pVM->hm.s.vmx.cPreemptTimerShift) 1565 LogRel(("HM: PREEMPT_TSC_BIT = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc))); 1566 else 1567 LogRel(("HM: PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n", 1568 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc), pVM->hm.s.vmx.cPreemptTimerShift)); 1569 LogRel(("HM: STORE_EFERLMA_VMEXIT = %RTbool\n", MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(fMisc))); 1570 uint8_t const fActivityState = MSR_IA32_VMX_MISC_ACTIVITY_STATES(fMisc); 1571 LogRel(("HM: ACTIVITY_STATES = %#x\n", fActivityState)); 1572 HMVMX_REPORT_MSR_CAP(fActivityState, " HLT", VMX_VMCS_GUEST_ACTIVITY_HLT); 1573 HMVMX_REPORT_MSR_CAP(fActivityState, " SHUTDOWN", VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN); 1574 HMVMX_REPORT_MSR_CAP(fActivityState, " SIPI_WAIT", VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT); 1575 LogRel(("HM: CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(fMisc))); 1576 LogRel(("HM: MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(fMisc))); 1577 LogRel(("HM: RDMSR_SMBASE_MSR_SMM = %RTbool\n", MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(fMisc))); 1578 LogRel(("HM: SMM_MONITOR_CTL_B2 = %RTbool\n", MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(fMisc))); 1579 LogRel(("HM: VMWRITE_VMEXIT_INFO = %RTbool\n", MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(fMisc))); 1580 LogRel(("HM: MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(fMisc))); 1581 } 1582 1583 1584 /** 1585 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log. 1586 * 1587 * @param uVmcsEnum The VMX VMCS enum MSR value. 1588 */ 1589 static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum) 1590 { 1591 uint64_t const val = uVmcsEnum; 1592 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val)); 1593 LogRel(("HM: HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val))); 1594 } 1595 1596 1597 /** 1598 * Reports MSR_IA32_VMX_VMFUNC MSR to the log. 1599 * 1600 * @param uVmFunc The VMX VMFUNC MSR value. 1601 */ 1602 static void hmR3VmxReportVmfuncMsr(uint64_t uVmFunc) 1603 { 1604 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc)); 1605 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING); 1606 } 1607 1608 1609 /** 1610 * Reports VMX CR0, CR4 fixed MSRs. 1611 * 1612 * @param pMsrs Pointer to the VMX MSRs. 1613 */ 1614 static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs) 1615 { 1616 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0)); 1617 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1)); 1618 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0)); 1619 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1)); 1620 } 1621 1622 1623 /** 1624 * Finish VT-x initialization (after ring-0 init). 1625 * 1626 * @returns VBox status code. 1627 * @param pVM The cross context VM structure. 1628 */ 1629 static int hmR3InitFinalizeR0Intel(PVM pVM) 1630 { 1631 int rc; 1632 1633 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 1634 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4); 1635 1636 LogRel(("HM: Using VT-x implementation 2.0\n")); 1637 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1638 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1639 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer)); 1640 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1641 1642 hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.Msrs.u64FeatCtrl); 1643 hmR3VmxReportBasicMsr(pVM->hm.s.vmx.Msrs.u64Basic); 1644 1645 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.PinCtls); 1646 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.ProcCtls); 1647 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1648 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.Msrs.ProcCtls2); 1649 1650 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.Msrs.EntryCtls); 1651 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.Msrs.ExitCtls); 1652 1653 if (MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic)) 1654 { 1655 /* We don't do extensive dumping of the true capability MSRs as we don't use them yet. */ 1656 /** @todo Consider using true capability MSRs and dumping them extensively. */ 1657 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TruePinCtls)); 1658 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueProcCtls)); 1659 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueEntryCtls)); 1660 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueExitCtls)); 1661 } 1662 1663 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.Msrs.u64Misc); 1664 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.Msrs.u64VmcsEnum); 1490 1665 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps) 1491 { 1492 val = pVM->hm.s.vmx.Msrs.u64EptVpidCaps; 1493 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", val)); 1494 HMVMX_REPORT_MSR_CAP(val, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); 1495 HMVMX_REPORT_MSR_CAP(val, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4); 1496 HMVMX_REPORT_MSR_CAP(val, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC); 1497 HMVMX_REPORT_MSR_CAP(val, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB); 1498 HMVMX_REPORT_MSR_CAP(val, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M); 1499 HMVMX_REPORT_MSR_CAP(val, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G); 1500 HMVMX_REPORT_MSR_CAP(val, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT); 1501 HMVMX_REPORT_MSR_CAP(val, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY); 1502 HMVMX_REPORT_MSR_CAP(val, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT); 1503 HMVMX_REPORT_MSR_CAP(val, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1504 HMVMX_REPORT_MSR_CAP(val, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID); 1505 HMVMX_REPORT_MSR_CAP(val, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 1506 HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT); 1507 HMVMX_REPORT_MSR_CAP(val, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS); 1508 HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS); 1509 } 1510 1511 val = pVM->hm.s.vmx.Msrs.u64Misc; 1512 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", val)); 1513 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val) == pVM->hm.s.vmx.cPreemptTimerShift) 1514 LogRel(("HM: PREEMPT_TSC_BIT = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val))); 1515 else 1516 { 1517 LogRel(("HM: PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n", 1518 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val), pVM->hm.s.vmx.cPreemptTimerShift)); 1519 } 1520 1521 LogRel(("HM: STORE_EFERLMA_VMEXIT = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val)))); 1522 LogRel(("HM: ACTIVITY_STATES = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(val))); 1523 LogRel(("HM: CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(val))); 1524 LogRel(("HM: MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(val))); 1525 LogRel(("HM: RDMSR_SMBASE_MSR_SMM = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val)))); 1526 LogRel(("HM: SMM_MONITOR_CTL_B2 = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val)))); 1527 LogRel(("HM: VMWRITE_VMEXIT_INFO = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val)))); 1528 LogRel(("HM: MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(val))); 1529 1530 /* Paranoia */ 1531 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc) >= 512); 1532 1533 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed0)); 1534 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed1)); 1535 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed0)); 1536 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed1)); 1537 1538 val = pVM->hm.s.vmx.Msrs.u64VmcsEnum; 1539 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val)); 1540 LogRel(("HM: HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val))); 1541 1542 val = pVM->hm.s.vmx.Msrs.u64Vmfunc; 1543 if (val) 1544 { 1545 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", val)); 1546 HMVMX_REPORT_ALLOWED_FEAT(val, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING); 1547 } 1548 1549 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess)); 1550 1666 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.Msrs.u64EptVpidCaps); 1667 if (pVM->hm.s.vmx.Msrs.u64Vmfunc) 1668 hmR3VmxReportVmfuncMsr(pVM->hm.s.vmx.Msrs.u64Vmfunc); 1669 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.Msrs); 1670 1671 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess)); 1551 1672 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1552 1673 { 1553 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));1554 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));1674 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 1675 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 1555 1676 } 1556 1677 … … 1559 1680 */ 1560 1681 AssertLogRelReturn( !pVM->hm.s.fNestedPaging 1561 || (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT),1682 || (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT), 1562 1683 VERR_HM_IPE_1); 1563 1684 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuest 1564 || ( (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)1685 || ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST) 1565 1686 && pVM->hm.s.fNestedPaging), 1566 1687 VERR_HM_IPE_1); … … 1569 1690 * Enable VPID if configured and supported. 1570 1691 */ 1571 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)1692 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 1572 1693 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; 1573 1694 … … 1576 1697 * Enable APIC register virtualization and virtual-interrupt delivery if supported. 1577 1698 */ 1578 if ( (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT)1579 && (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY))1699 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT) 1700 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY)) 1580 1701 pVM->hm.s.fVirtApicRegs = true; 1581 1702 … … 1585 1706 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI 1586 1707 * here. */ 1587 if ( (pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1& VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR)1588 && (pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT))1708 if ( (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR) 1709 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)) 1589 1710 pVM->hm.s.fPostedIntrs = true; 1590 1711 #endif … … 1595 1716 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel... 1596 1717 */ 1597 if ( !(pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1718 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1598 1719 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)) 1599 1720 { … … 1644 1765 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys); 1645 1766 AssertRCReturn(rc, rc); 1646 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));1767 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys)); 1647 1768 1648 1769 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 1649 1770 AssertRCReturn(rc, rc); 1650 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));1771 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys)); 1651 1772 } 1652 1773 } … … 1683 1804 } 1684 1805 1685 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));1806 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer)); 1686 1807 LogRel(("HM: Enabled VMX\n")); 1687 1808 pVM->hm.s.vmx.fEnabled = true; … … 1719 1840 LogRel(("HM: Enabled nested paging\n")); 1720 1841 if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT) 1721 LogRel(("HM: EPT flush type = Single context\n"));1842 LogRel(("HM: EPT flush type = Single context\n")); 1722 1843 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS) 1723 LogRel(("HM: EPT flush type = All contexts\n"));1844 LogRel(("HM: EPT flush type = All contexts\n")); 1724 1845 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED) 1725 LogRel(("HM: EPT flush type = Not supported\n"));1846 LogRel(("HM: EPT flush type = Not supported\n")); 1726 1847 else 1727 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt));1848 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt)); 1728 1849 1729 1850 if (pVM->hm.s.vmx.fUnrestrictedGuest) … … 1752 1873 LogRel(("HM: Enabled VPID\n")); 1753 1874 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR) 1754 LogRel(("HM: VPID flush type = Individual addresses\n"));1875 LogRel(("HM: VPID flush type = Individual addresses\n")); 1755 1876 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 1756 LogRel(("HM: VPID flush type = Single context\n"));1877 LogRel(("HM: VPID flush type = Single context\n")); 1757 1878 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS) 1758 LogRel(("HM: VPID flush type = All contexts\n"));1879 LogRel(("HM: VPID flush type = All contexts\n")); 1759 1880 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1760 LogRel(("HM: VPID flush type = Single context retain globals\n"));1881 LogRel(("HM: VPID flush type = Single context retain globals\n")); 1761 1882 else 1762 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid));1883 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid)); 1763 1884 } 1764 1885 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED) … … 1792 1913 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 1793 1914 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1794 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));1795 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));1796 1915 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr)); 1797 1916 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev)); … … 3399 3518 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM) 3400 3519 { 3401 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs. VmxEntry.n.allowed1));3402 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs. VmxEntry.n.disallowed0));3520 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1)); 3521 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0)); 3403 3522 } 3404 3523 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
Note:
See TracChangeset
for help on using the changeset viewer.