Changeset 73293 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 21, 2018 3:11:53 PM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r73266 r73293 363 363 364 364 if ( pVM->hm.s.vmx.fSupported 365 && (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))365 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 366 366 { 367 367 return true; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r73292 r73293 152 152 int32_t rcInit; 153 153 154 /** CPUID 0x80000001 ecx:edx features */155 struct156 {157 uint32_t u32AMDFeatureECX;158 uint32_t u32AMDFeatureEDX;159 } cpuid;160 161 154 /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's 162 155 * enabled and disabled each time it's used to execute guest code. */ … … 340 333 * 341 334 * @returns VBox status code (will only fail if out of memory). 342 */ 343 static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX) 335 * @param uFeatEcx Standard cpuid:1 feature ECX leaf. 336 * @param uFeatEdx Standard cpuid:1 feature EDX leaf. 337 */ 338 static int hmR0InitIntel(uint32_t uFeatEcx, uint32_t uFeatEdx) 344 339 { 345 340 /* … … 347 342 * We also assume all VT-x-enabled CPUs support fxsave/fxrstor. 348 343 */ 349 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX) 350 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR) 351 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR) 352 ) 344 if ( (uFeatEcx & X86_CPUID_FEATURE_ECX_VMX) 345 && (uFeatEdx & X86_CPUID_FEATURE_EDX_MSR) 346 && (uFeatEdx & X86_CPUID_FEATURE_EDX_FXSR)) 353 347 { 354 348 /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */ 355 g_HmR0.vmx.Msrs.u64Feat ureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);349 g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 356 350 357 351 /* … … 382 376 { 383 377 /* Reread in case it was changed by SUPR0GetVmxUsability(). */ 384 g_HmR0.vmx.Msrs.u64Feat ureCtrl= ASMRdMsr(MSR_IA32_FEATURE_CONTROL);378 g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 385 379 386 380 /* … … 391 385 g_HmR0.vmx.Msrs.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC); 392 386 /* KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL 393 * depends on bit 49 of MSR_IA32_VMX_BASIC _INFO while table 35-2 says that this MSR394 * isavailable if either VMX or SMX is supported. */387 * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is 388 * available if either VMX or SMX is supported. */ 395 389 if (MSR_IA32_VMX_BASIC_DUAL_MON(g_HmR0.vmx.Msrs.u64Basic)) 396 390 g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL); 397 g_HmR0.vmx.Msrs. VmxPinCtls.u= ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);398 g_HmR0.vmx.Msrs. VmxProcCtls.u= ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);399 g_HmR0.vmx.Msrs. VmxExit.u= ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);400 g_HmR0.vmx.Msrs. VmxEntry.u= ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);391 g_HmR0.vmx.Msrs.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 392 g_HmR0.vmx.Msrs.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 393 g_HmR0.vmx.Msrs.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 394 g_HmR0.vmx.Msrs.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 401 395 g_HmR0.vmx.Msrs.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC); 402 396 g_HmR0.vmx.Msrs.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); … … 405 399 g_HmR0.vmx.Msrs.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 406 400 g_HmR0.vmx.Msrs.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 401 if (MSR_IA32_VMX_BASIC_TRUE_CONTROLS(g_HmR0.vmx.Msrs.u64Basic)) 402 { 403 g_HmR0.vmx.Msrs.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS); 404 g_HmR0.vmx.Msrs.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS); 405 g_HmR0.vmx.Msrs.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS); 406 g_HmR0.vmx.Msrs.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS); 407 } 408 407 409 /* VPID 16 bits ASID. */ 408 g_HmR0.uMaxAsid 409 410 if (g_HmR0.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)410 g_HmR0.uMaxAsid = 0x10000; /* exclusive */ 411 412 if (g_HmR0.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 411 413 { 412 g_HmR0.vmx.Msrs. VmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);413 if (g_HmR0.vmx.Msrs. VmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))414 g_HmR0.vmx.Msrs.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 415 if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID)) 414 416 g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP); 415 417 416 if (g_HmR0.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC)418 if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) 417 419 g_HmR0.vmx.Msrs.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 418 420 } … … 513 515 * Timer Does Not Count Down at the Rate Specified" erratum. 514 516 */ 515 if (g_HmR0.vmx.Msrs. VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)517 if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER) 516 518 { 517 519 g_HmR0.vmx.fUsePreemptTimer = true; … … 537 539 * 538 540 * @returns VBox status code. 539 */ 540 static int hmR0InitAmd(uint32_t u32FeaturesEDX, uint32_t uMaxExtLeaf) 541 * @param uFeatEdx Standard cpuid:1 feature EDX leaf. 542 * @param uExtFeatEcx Extended cpuid:0x80000001 feature ECX leaf. 543 * @param uMaxExtLeaf Extended cpuid:0x80000000 feature maximum valid leaf. 544 */ 545 static int hmR0InitAmd(uint32_t uFeatEdx, uint32_t uExtFeatEcx, uint32_t uMaxExtLeaf) 541 546 { 542 547 /* 543 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)544 * We also assume all SVM-enabled CPUs supportfxsave/fxrstor.548 * Read all SVM MSRs if SVM is available. 549 * We also require all SVM-enabled CPUs to support rdmsr/wrmsr and fxsave/fxrstor. 545 550 */ 546 551 int rc; 547 if ( ( g_HmR0.cpuid.u32AMDFeatureECX& X86_CPUID_AMD_FEATURE_ECX_SVM)548 && (u 32FeaturesEDX& X86_CPUID_FEATURE_EDX_MSR)549 && (u 32FeaturesEDX& X86_CPUID_FEATURE_EDX_FXSR)552 if ( (uExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM) 553 && (uFeatEdx & X86_CPUID_FEATURE_EDX_MSR) 554 && (uFeatEdx & X86_CPUID_FEATURE_EDX_FXSR) 550 555 && ASMIsValidExtRange(uMaxExtLeaf) 551 && uMaxExtLeaf >= 0x8000000a 552 ) 556 && uMaxExtLeaf >= 0x8000000a) 553 557 { 554 558 /* Call the global AMD-V initialization routine. */ … … 606 610 else 607 611 { 608 rc = VINF_SUCCESS; /* Don't fail if AMD-V is not supported. See @bugref{6785}. */ 612 /* Don't fail if AMD-V is not supported. See @bugref{6785}. */ 613 rc = VINF_SUCCESS; 609 614 g_HmR0.rcInit = VERR_SVM_NO_SVM; 610 615 } … … 669 674 { 670 675 /* Standard features. */ 671 uint32_t uMaxLeaf, u 32VendorEBX, u32VendorECX, u32VendorEDX;672 ASMCpuId(0, &uMaxLeaf, &u 32VendorEBX, &u32VendorECX, &u32VendorEDX);676 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx; 677 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx); 673 678 if (ASMIsValidStdRange(uMaxLeaf)) 674 679 { 675 uint32_t u32FeaturesECX, u32FeaturesEDX, u32Dummy; 676 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX); 677 678 /* Query AMD features. */ 679 uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000); 680 if (ASMIsValidExtRange(uMaxExtLeaf)) 681 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, 682 &g_HmR0.cpuid.u32AMDFeatureECX, 683 &g_HmR0.cpuid.u32AMDFeatureEDX); 684 else 685 g_HmR0.cpuid.u32AMDFeatureECX = g_HmR0.cpuid.u32AMDFeatureEDX = 0; 680 uint32_t uFeatEcx, uFeatEdx, uDummy; 681 ASMCpuId(1, &uDummy, &uDummy, &uFeatEcx, &uFeatEdx); 686 682 687 683 /* Go to CPU specific initialization code. */ 688 if ( ASMIsIntelCpuEx(u 32VendorEBX, u32VendorECX, u32VendorEDX)689 || ASMIsViaCentaurCpuEx(u 32VendorEBX, u32VendorECX, u32VendorEDX))684 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx) 685 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)) 690 686 { 691 rc = hmR0InitIntel(u 32FeaturesECX, u32FeaturesEDX);687 rc = hmR0InitIntel(uFeatEcx, uFeatEdx); 692 688 if (RT_FAILURE(rc)) 693 689 return rc; 694 690 } 695 else if (ASMIsAmdCpuEx(u 32VendorEBX, u32VendorECX, u32VendorEDX))691 else if (ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)) 696 692 { 697 rc = hmR0InitAmd(u32FeaturesEDX, uMaxExtLeaf); 693 /* Query extended features for SVM capability. */ 694 uint32_t uExtFeatEcx; 695 uint32_t const uMaxExtLeaf = ASMCpuId_EAX(0x80000000); 696 if (ASMIsValidExtRange(uMaxExtLeaf)) 697 ASMCpuId(0x80000001, &uDummy, &uDummy, &uExtFeatEcx, &uDummy); 698 else 699 uExtFeatEcx = 0; 700 701 rc = hmR0InitAmd(uFeatEdx, uExtFeatEcx, uMaxExtLeaf); 698 702 if (RT_FAILURE(rc)) 699 703 return rc; … … 1252 1256 * Copy globals to the VM structure. 1253 1257 */ 1254 pVM->hm.s.vmx.fSupported = g_HmR0.vmx.fSupported; 1255 pVM->hm.s.svm.fSupported = g_HmR0.svm.fSupported; 1256 1257 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */ 1258 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.vmx.cPreemptTimerShift; 1259 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.vmx.u64HostCr4; 1260 pVM->hm.s.vmx.u64HostEfer = g_HmR0.vmx.u64HostEfer; 1261 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0.vmx.u64HostSmmMonitorCtl; 1262 pVM->hm.s.vmx.Msrs = g_HmR0.vmx.Msrs; 1263 pVM->hm.s.svm.u64MsrHwcr = g_HmR0.svm.u64MsrHwcr; 1264 pVM->hm.s.svm.u32Rev = g_HmR0.svm.u32Rev; 1265 pVM->hm.s.svm.u32Features = g_HmR0.svm.u32Features; 1266 pVM->hm.s.cpuid.u32AMDFeatureECX = g_HmR0.cpuid.u32AMDFeatureECX; 1267 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_HmR0.cpuid.u32AMDFeatureEDX; 1268 pVM->hm.s.rcInit = g_HmR0.rcInit; 1269 pVM->hm.s.uMaxAsid = g_HmR0.uMaxAsid; 1270 1271 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */ 1258 pVM->hm.s.vmx.fSupported = g_HmR0.vmx.fSupported; 1259 pVM->hm.s.svm.fSupported = g_HmR0.svm.fSupported; 1260 Assert(!(pVM->hm.s.vmx.fSupported && pVM->hm.s.svm.fSupported)); 1261 if (pVM->hm.s.vmx.fSupported) 1262 { 1263 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */ 1264 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.vmx.cPreemptTimerShift; 1265 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.vmx.u64HostCr4; 1266 pVM->hm.s.vmx.u64HostEfer = g_HmR0.vmx.u64HostEfer; 1267 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0.vmx.u64HostSmmMonitorCtl; 1268 pVM->hm.s.vmx.Msrs = g_HmR0.vmx.Msrs; 1269 } 1270 else if (pVM->hm.s.svm.fSupported) 1271 { 1272 pVM->hm.s.svm.u64MsrHwcr = g_HmR0.svm.u64MsrHwcr; 1273 pVM->hm.s.svm.u32Rev = g_HmR0.svm.u32Rev; 1274 pVM->hm.s.svm.u32Features = g_HmR0.svm.u32Features; 1275 } 1276 pVM->hm.s.rcInit = g_HmR0.rcInit; 1277 pVM->hm.s.uMaxAsid = g_HmR0.uMaxAsid; 1278 1279 /* 1280 * Set default maximum inner loops in ring-0 before returning to ring-3. 1281 * Can be overriden using CFGM. 1282 */ 1283 if (!pVM->hm.s.cMaxResumeLoops) 1272 1284 { 1273 1285 pVM->hm.s.cMaxResumeLoops = 1024; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r73287 r73293 4604 4604 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc). 4605 4605 */ 4606 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)4606 if ( pVM->cpum.ro.HostFeatures.fRdTscP 4607 4607 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 4608 4608 { -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73292 r73293 916 916 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); 917 917 918 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)918 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 919 919 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap); 920 920 … … 985 985 986 986 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */ 987 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)987 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 988 988 { 989 989 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, … … 1008 1008 /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */ 1009 1009 if ( PDMHasApic(pVM) 1010 && (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW))1010 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)) 1011 1011 { 1012 1012 rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, … … 1023 1023 * update HMAreMsrBitmapsAvailable(). 1024 1024 */ 1025 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)1025 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1026 1026 { 1027 1027 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, … … 2292 2292 { 2293 2293 PVM pVM = pVCpu->CTX_SUFF(pVM); 2294 uint32_t fVal = pVM->hm.s.vmx.Msrs. VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */2295 uint32_t const fZap = pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */2294 uint32_t fVal = pVM->hm.s.vmx.Msrs.PinCtls.n.disallowed0; /* Bits set here must always be set. */ 2295 uint32_t const fZap = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ 2296 2296 2297 2297 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */ 2298 2298 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */ 2299 2299 2300 if (pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)2300 if (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 2301 2301 fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */ 2302 2302 … … 2304 2304 if (pVM->hm.s.vmx.fUsePreemptTimer) 2305 2305 { 2306 Assert(pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);2306 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 2307 2307 fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER; 2308 2308 } … … 2312 2312 if (pVM->hm.s.fPostedIntrs) 2313 2313 { 2314 Assert(pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);2315 Assert(pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);2314 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR); 2315 Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT); 2316 2316 fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR; 2317 2317 } … … 2321 2321 { 2322 2322 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2323 pVM->hm.s.vmx.Msrs. VmxPinCtls.n.disallowed0, fVal, fZap));2323 pVM->hm.s.vmx.Msrs.PinCtls.n.disallowed0, fVal, fZap)); 2324 2324 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC; 2325 2325 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2347 2347 { 2348 2348 PVM pVM = pVCpu->CTX_SUFF(pVM); 2349 uint32_t fVal = pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */2350 uint32_t const fZap = pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */2349 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2350 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2351 2351 2352 2352 /* WBINVD causes a VM-exit. */ 2353 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)2353 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 2354 2354 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; 2355 2355 … … 2362 2362 * it to the guest. Without this, guest executing INVPCID would cause a #UD. 2363 2363 */ 2364 if ( (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)2364 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID) 2365 2365 && pVM->cpum.ro.GuestFeatures.fInvpcid) 2366 2366 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; … … 2378 2378 { 2379 2379 /* Enable APIC-register virtualization. */ 2380 Assert(pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);2380 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 2381 2381 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT; 2382 2382 2383 2383 /* Enable virtual-interrupt delivery. */ 2384 Assert(pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);2384 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 2385 2385 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY; 2386 2386 } … … 2390 2390 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be 2391 2391 * done dynamically. */ 2392 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2392 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2393 2393 { 2394 2394 Assert(pVM->hm.s.vmx.HCPhysApicAccess); … … 2400 2400 2401 2401 /* Enable RDTSCP. */ 2402 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)2402 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2403 2403 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; 2404 2404 2405 2405 /* Enable Pause-Loop exiting. */ 2406 if ( pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT2406 if ( pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT 2407 2407 && pVM->hm.s.vmx.cPleGapTicks 2408 2408 && pVM->hm.s.vmx.cPleWindowTicks) … … 2418 2418 { 2419 2419 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2420 pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.disallowed0, fVal, fZap));2420 pVM->hm.s.vmx.Msrs.ProcCtls2.n.disallowed0, fVal, fZap)); 2421 2421 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 2422 2422 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2444 2444 { 2445 2445 PVM pVM = pVCpu->CTX_SUFF(pVM); 2446 uint32_t fVal = pVM->hm.s.vmx.Msrs. VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */2447 uint32_t const fZap = pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */2446 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2447 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2448 2448 2449 2449 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ … … 2456 2456 2457 2457 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */ 2458 if ( !(pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)2459 || (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))2458 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) 2459 || (pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)) 2460 2460 { 2461 2461 LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!")); … … 2475 2475 /* Use TPR shadowing if supported by the CPU. */ 2476 2476 if ( PDMHasApic(pVM) 2477 && pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)2477 && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 2478 2478 { 2479 2479 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); … … 2502 2502 2503 2503 /* Use MSR-bitmaps if supported by the CPU. */ 2504 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)2504 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 2505 2505 { 2506 2506 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS; … … 2543 2543 2544 2544 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ 2545 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)2545 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 2546 2546 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL; 2547 2547 … … 2549 2549 { 2550 2550 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2551 pVM->hm.s.vmx.Msrs. VmxProcCtls.n.disallowed0, fVal, fZap));2551 pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0, fVal, fZap)); 2552 2552 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC; 2553 2553 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2762 2762 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer); 2763 2763 #if HC_ARCH_BITS == 64 2764 if ( (pVM->hm.s.vmx.Msrs. VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)2765 && (pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)2766 && (pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))2764 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 2765 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR) 2766 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR)) 2767 2767 { 2768 2768 pVM->hm.s.vmx.fSupportsVmcsEfer = true; … … 3203 3203 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE)) 3204 3204 { 3205 /* Assert that host is PAEcapable. */3206 Assert(pV M->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);3205 /* Assert that host is NX capable. */ 3206 Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute); 3207 3207 return true; 3208 3208 } … … 3229 3229 { 3230 3230 PVM pVM = pVCpu->CTX_SUFF(pVM); 3231 uint32_t fVal = pVM->hm.s.vmx.Msrs. VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */3232 uint32_t const fZap = pVM->hm.s.vmx.Msrs. VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */3231 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3232 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3233 3233 3234 3234 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ … … 3264 3264 { 3265 3265 Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3266 pVM->hm.s.vmx.Msrs. VmxEntry.n.disallowed0, fVal, fZap));3266 pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0, fVal, fZap)); 3267 3267 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 3268 3268 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 3296 3296 { 3297 3297 PVM pVM = pVCpu->CTX_SUFF(pVM); 3298 uint32_t fVal = pVM->hm.s.vmx.Msrs. VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */3299 uint32_t const fZap = pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */3298 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3299 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3300 3300 3301 3301 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ … … 3342 3342 /* Enable saving of the VMX preemption timer value on VM-exit. */ 3343 3343 if ( pVM->hm.s.vmx.fUsePreemptTimer 3344 && (pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))3344 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)) 3345 3345 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER; 3346 3346 … … 3348 3348 { 3349 3349 LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3350 pVM->hm.s.vmx.Msrs. VmxExit.n.disallowed0, fVal, fZap));3350 pVM->hm.s.vmx.Msrs.ExitCtls.n.disallowed0, fVal, fZap)); 3351 3351 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 3352 3352 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 4079 4079 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ 4080 4080 PVM pVM = pVCpu->CTX_SUFF(pVM); 4081 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)4081 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG) 4082 4082 { 4083 4083 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; … … 4790 4790 4791 4791 /* We need to intercept reads too, see @bugref{7386#c16}. */ 4792 if (pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)4792 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 4793 4793 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4794 4794 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER, … … 7099 7099 DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu) 7100 7100 { 7101 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))7101 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)) 7102 7102 { 7103 7103 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)) … … 7135 7135 DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu) 7136 7136 { 7137 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))7137 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)) 7138 7138 { 7139 7139 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)) … … 7370 7370 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be. 7371 7371 */ 7372 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));7372 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)); 7373 7373 fIntrState = 0; 7374 7374 } … … 9170 9170 * Sanitize the control stuff. 9171 9171 */ 9172 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1;9172 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1; 9173 9173 if (pDbgState->fCpe2Extra) 9174 9174 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL; 9175 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1;9176 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs. VmxProcCtls.n.disallowed0;9175 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; 9176 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0; 9177 9177 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 9178 9178 { -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r73292 r73293 1230 1230 { 1231 1231 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit)); 1232 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64Feat ureCtrl));1232 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatCtrl)); 1233 1233 switch (pVM->hm.s.rcInit) 1234 1234 { … … 1353 1353 1354 1354 /** 1355 * Finish VT-x initialization (after ring-0 init). 1356 * 1357 * @returns VBox status code. 1358 * @param pVM The cross context VM structure. 1359 */ 1360 static int hmR3InitFinalizeR0Intel(PVM pVM) 1361 { 1362 int rc; 1363 1364 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 1365 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatureCtrl != 0, VERR_HM_IPE_4); 1366 1367 uint64_t val; 1368 uint64_t zap; 1369 1370 LogRel(("HM: Using VT-x implementation 2.0\n")); 1371 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1372 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer)); 1373 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1374 1375 val = pVM->hm.s.vmx.Msrs.u64FeatureCtrl; 1376 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val)); 1355 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log. 1356 * 1357 * @param fFeatMsr The feature control MSR value. 1358 */ 1359 static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr) 1360 { 1361 uint64_t const val = fFeatMsr; 1362 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val)); 1377 1363 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK); 1378 1364 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON); … … 1389 1375 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN); 1390 1376 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE); 1391 if (!( pVM->hm.s.vmx.Msrs.u64FeatureCtrl & MSR_IA32_FEATURE_CONTROL_LOCK))1377 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK)) 1392 1378 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n")); 1393 1394 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Basic)); 1395 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_VMCS_ID(pVM->hm.s.vmx.Msrs.u64Basic))); 1396 LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64Basic))); 1397 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.Msrs.u64Basic) ? "< 4 GB" : "None")); 1398 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(pVM->hm.s.vmx.Msrs.u64Basic))); 1399 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", MSR_IA32_VMX_BASIC_DUAL_MON(pVM->hm.s.vmx.Msrs.u64Basic))); 1400 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64Basic))); 1401 LogRel(("HM: Supports true capability MSRs = %RTbool\n", MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic))); 1402 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1403 1404 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxPinCtls.u)); 1405 val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; 1406 zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; 1379 } 1380 1381 1382 /** 1383 * Reports MSR_IA32_VMX_BASIC MSR to the log. 1384 * 1385 * @param uBasicMsr The VMX basic MSR value. 1386 */ 1387 static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr) 1388 { 1389 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr)); 1390 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_VMCS_ID(uBasicMsr))); 1391 LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(uBasicMsr))); 1392 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(uBasicMsr) ? "< 4 GB" 1393 : "None")); 1394 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr))); 1395 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", MSR_IA32_VMX_BASIC_DUAL_MON(uBasicMsr))); 1396 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(uBasicMsr))); 1397 LogRel(("HM: Supports true capability MSRs = %RTbool\n", MSR_IA32_VMX_BASIC_TRUE_CONTROLS(uBasicMsr))); 1398 } 1399 1400 1401 /** 1402 * Reports MSR_IA32_PINBASED_CTLS to the log. 1403 * 1404 * @param pVmxMsr Pointer to the VMX MSR. 1405 */ 1406 static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1407 { 1408 uint64_t const val = pVmxMsr->n.allowed1; 1409 uint64_t const zap = pVmxMsr->n.disallowed0; 1410 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u)); 1407 1411 HMVMX_REPORT_FEAT(val, zap, "EXT_INT_EXIT", VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT); 1408 1412 HMVMX_REPORT_FEAT(val, zap, "NMI_EXIT", VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT); … … 1410 1414 HMVMX_REPORT_FEAT(val, zap, "PREEMPT_TIMER", VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 1411 1415 HMVMX_REPORT_FEAT(val, zap, "POSTED_INTR", VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR); 1412 1413 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls.u)); 1414 val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; 1415 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; 1416 } 1417 1418 1419 /** 1420 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log. 1421 * 1422 * @param pVmxMsr Pointer to the VMX MSR. 1423 */ 1424 static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1425 { 1426 uint64_t const val = pVmxMsr->n.allowed1; 1427 uint64_t const zap = pVmxMsr->n.disallowed0; 1428 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u)); 1416 1429 HMVMX_REPORT_FEAT(val, zap, "INT_WINDOW_EXIT", VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT); 1417 1430 HMVMX_REPORT_FEAT(val, zap, "USE_TSC_OFFSETTING", VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING); … … 1435 1448 HMVMX_REPORT_FEAT(val, zap, "PAUSE_EXIT", VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT); 1436 1449 HMVMX_REPORT_FEAT(val, zap, "USE_SECONDARY_EXEC_CTRL", VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL); 1437 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1438 { 1439 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.u)); 1440 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; 1441 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; 1442 HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 1443 HMVMX_REPORT_FEAT(val, zap, "EPT", VMX_VMCS_CTRL_PROC_EXEC2_EPT); 1444 HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT); 1445 HMVMX_REPORT_FEAT(val, zap, "RDTSCP", VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP); 1446 HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC); 1447 HMVMX_REPORT_FEAT(val, zap, "VPID", VMX_VMCS_CTRL_PROC_EXEC2_VPID); 1448 HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT); 1449 HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST", VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST); 1450 HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT", VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 1451 HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 1452 HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT); 1453 HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT); 1454 HMVMX_REPORT_FEAT(val, zap, "INVPCID", VMX_VMCS_CTRL_PROC_EXEC2_INVPCID); 1455 HMVMX_REPORT_FEAT(val, zap, "VMFUNC", VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC); 1456 HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING", VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING); 1457 HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT); 1458 HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT); 1459 HMVMX_REPORT_FEAT(val, zap, "PML", VMX_VMCS_CTRL_PROC_EXEC2_PML); 1460 HMVMX_REPORT_FEAT(val, zap, "EPT_VE", VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE); 1461 HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT", VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT); 1462 HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS", VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS); 1463 HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING", VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING); 1464 } 1465 1466 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxEntry.u)); 1467 val = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; 1468 zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; 1450 } 1451 1452 1453 /** 1454 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log. 1455 * 1456 * @param pVmxMsr Pointer to the VMX MSR. 1457 */ 1458 static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr) 1459 { 1460 uint64_t const val = pVmxMsr->n.allowed1; 1461 uint64_t const zap = pVmxMsr->n.disallowed0; 1462 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u)); 1463 HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 1464 HMVMX_REPORT_FEAT(val, zap, "EPT", VMX_VMCS_CTRL_PROC_EXEC2_EPT); 1465 HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT); 1466 HMVMX_REPORT_FEAT(val, zap, "RDTSCP", VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP); 1467 HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC); 1468 HMVMX_REPORT_FEAT(val, zap, "VPID", VMX_VMCS_CTRL_PROC_EXEC2_VPID); 1469 HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT); 1470 HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST", VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST); 1471 HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT", VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 1472 HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY", VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 1473 HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT); 1474 HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT); 1475 HMVMX_REPORT_FEAT(val, zap, "INVPCID", VMX_VMCS_CTRL_PROC_EXEC2_INVPCID); 1476 HMVMX_REPORT_FEAT(val, zap, "VMFUNC", VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC); 1477 HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING", VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING); 1478 HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT); 1479 HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT); 1480 HMVMX_REPORT_FEAT(val, zap, "PML", VMX_VMCS_CTRL_PROC_EXEC2_PML); 1481 HMVMX_REPORT_FEAT(val, zap, "EPT_VE", VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE); 1482 HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT", VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT); 1483 HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS", VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS); 1484 HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING", VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING); 1485 } 1486 1487 1488 /** 1489 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log. 1490 * 1491 * @param pVmxMsr Pointer to the VMX MSR. 1492 */ 1493 static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1494 { 1495 uint64_t const val = pVmxMsr->n.allowed1; 1496 uint64_t const zap = pVmxMsr->n.disallowed0; 1497 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u)); 1469 1498 HMVMX_REPORT_FEAT(val, zap, "LOAD_DEBUG", VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG); 1470 1499 HMVMX_REPORT_FEAT(val, zap, "IA32E_MODE_GUEST", VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST); … … 1474 1503 HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_PAT_MSR", VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR); 1475 1504 HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_EFER_MSR", VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR); 1476 1477 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxExit.u)); 1478 val = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; 1479 zap = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; 1505 } 1506 1507 1508 /** 1509 * Reports MSR_IA32_VMX_EXIT_CTLS to the log. 1510 * 1511 * @param pVmxMsr Pointer to the VMX MSR. 1512 */ 1513 static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr) 1514 { 1515 uint64_t const val = pVmxMsr->n.allowed1; 1516 uint64_t const zap = pVmxMsr->n.disallowed0; 1517 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u)); 1480 1518 HMVMX_REPORT_FEAT(val, zap, "SAVE_DEBUG", VMX_VMCS_CTRL_EXIT_SAVE_DEBUG); 1481 1519 HMVMX_REPORT_FEAT(val, zap, "HOST_ADDR_SPACE_SIZE", VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE); … … 1487 1525 HMVMX_REPORT_FEAT(val, zap, "LOAD_HOST_EFER_MSR", VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR); 1488 1526 HMVMX_REPORT_FEAT(val, zap, "SAVE_VMX_PREEMPT_TIMER", VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER); 1489 1527 } 1528 1529 1530 /** 1531 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log. 1532 * 1533 * @param fCaps The VMX EPT/VPID capability MSR value. 1534 */ 1535 static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps) 1536 { 1537 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps)); 1538 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); 1539 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4); 1540 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC); 1541 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB); 1542 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M); 1543 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G); 1544 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT); 1545 HMVMX_REPORT_MSR_CAP(fCaps, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY); 1546 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT); 1547 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1548 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID); 1549 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 1550 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT); 1551 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS); 1552 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS); 1553 } 1554 1555 1556 /** 1557 * Reports MSR_IA32_VMX_MISC MSR to the log. 1558 * 1559 * @param fMisc The VMX misc. MSR value. 1560 */ 1561 static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc) 1562 { 1563 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc)); 1564 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc) == pVM->hm.s.vmx.cPreemptTimerShift) 1565 LogRel(("HM: PREEMPT_TSC_BIT = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc))); 1566 else 1567 LogRel(("HM: PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n", 1568 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc), pVM->hm.s.vmx.cPreemptTimerShift)); 1569 LogRel(("HM: STORE_EFERLMA_VMEXIT = %RTbool\n", MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(fMisc))); 1570 uint8_t const fActivityState = MSR_IA32_VMX_MISC_ACTIVITY_STATES(fMisc); 1571 LogRel(("HM: ACTIVITY_STATES = %#x\n", fActivityState)); 1572 HMVMX_REPORT_MSR_CAP(fActivityState, " HLT", VMX_VMCS_GUEST_ACTIVITY_HLT); 1573 HMVMX_REPORT_MSR_CAP(fActivityState, " SHUTDOWN", VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN); 1574 HMVMX_REPORT_MSR_CAP(fActivityState, " SIPI_WAIT", VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT); 1575 LogRel(("HM: CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(fMisc))); 1576 LogRel(("HM: MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(fMisc))); 1577 LogRel(("HM: RDMSR_SMBASE_MSR_SMM = %RTbool\n", MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(fMisc))); 1578 LogRel(("HM: SMM_MONITOR_CTL_B2 = %RTbool\n", MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(fMisc))); 1579 LogRel(("HM: VMWRITE_VMEXIT_INFO = %RTbool\n", MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(fMisc))); 1580 LogRel(("HM: MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(fMisc))); 1581 } 1582 1583 1584 /** 1585 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log. 1586 * 1587 * @param uVmcsEnum The VMX VMCS enum MSR value. 1588 */ 1589 static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum) 1590 { 1591 uint64_t const val = uVmcsEnum; 1592 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val)); 1593 LogRel(("HM: HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val))); 1594 } 1595 1596 1597 /** 1598 * Reports MSR_IA32_VMX_VMFUNC MSR to the log. 1599 * 1600 * @param uVmFunc The VMX VMFUNC MSR value. 1601 */ 1602 static void hmR3VmxReportVmfuncMsr(uint64_t uVmFunc) 1603 { 1604 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc)); 1605 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING); 1606 } 1607 1608 1609 /** 1610 * Reports VMX CR0, CR4 fixed MSRs. 1611 * 1612 * @param pMsrs Pointer to the VMX MSRs. 1613 */ 1614 static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs) 1615 { 1616 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0)); 1617 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1)); 1618 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0)); 1619 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1)); 1620 } 1621 1622 1623 /** 1624 * Finish VT-x initialization (after ring-0 init). 1625 * 1626 * @returns VBox status code. 1627 * @param pVM The cross context VM structure. 1628 */ 1629 static int hmR3InitFinalizeR0Intel(PVM pVM) 1630 { 1631 int rc; 1632 1633 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 1634 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4); 1635 1636 LogRel(("HM: Using VT-x implementation 2.0\n")); 1637 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1638 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1639 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer)); 1640 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1641 1642 hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.Msrs.u64FeatCtrl); 1643 hmR3VmxReportBasicMsr(pVM->hm.s.vmx.Msrs.u64Basic); 1644 1645 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.PinCtls); 1646 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.ProcCtls); 1647 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1648 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.Msrs.ProcCtls2); 1649 1650 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.Msrs.EntryCtls); 1651 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.Msrs.ExitCtls); 1652 1653 if (MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic)) 1654 { 1655 /* We don't do extensive dumping of the true capability MSRs as we don't use them yet. */ 1656 /** @todo Consider using true capability MSRs and dumping them extensively. */ 1657 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TruePinCtls)); 1658 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueProcCtls)); 1659 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueEntryCtls)); 1660 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueExitCtls)); 1661 } 1662 1663 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.Msrs.u64Misc); 1664 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.Msrs.u64VmcsEnum); 1490 1665 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps) 1491 { 1492 val = pVM->hm.s.vmx.Msrs.u64EptVpidCaps; 1493 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", val)); 1494 HMVMX_REPORT_MSR_CAP(val, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); 1495 HMVMX_REPORT_MSR_CAP(val, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4); 1496 HMVMX_REPORT_MSR_CAP(val, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC); 1497 HMVMX_REPORT_MSR_CAP(val, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB); 1498 HMVMX_REPORT_MSR_CAP(val, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M); 1499 HMVMX_REPORT_MSR_CAP(val, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G); 1500 HMVMX_REPORT_MSR_CAP(val, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT); 1501 HMVMX_REPORT_MSR_CAP(val, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY); 1502 HMVMX_REPORT_MSR_CAP(val, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT); 1503 HMVMX_REPORT_MSR_CAP(val, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1504 HMVMX_REPORT_MSR_CAP(val, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID); 1505 HMVMX_REPORT_MSR_CAP(val, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 1506 HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT); 1507 HMVMX_REPORT_MSR_CAP(val, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS); 1508 HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS); 1509 } 1510 1511 val = pVM->hm.s.vmx.Msrs.u64Misc; 1512 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", val)); 1513 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val) == pVM->hm.s.vmx.cPreemptTimerShift) 1514 LogRel(("HM: PREEMPT_TSC_BIT = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val))); 1515 else 1516 { 1517 LogRel(("HM: PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n", 1518 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val), pVM->hm.s.vmx.cPreemptTimerShift)); 1519 } 1520 1521 LogRel(("HM: STORE_EFERLMA_VMEXIT = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val)))); 1522 LogRel(("HM: ACTIVITY_STATES = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(val))); 1523 LogRel(("HM: CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(val))); 1524 LogRel(("HM: MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(val))); 1525 LogRel(("HM: RDMSR_SMBASE_MSR_SMM = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val)))); 1526 LogRel(("HM: SMM_MONITOR_CTL_B2 = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val)))); 1527 LogRel(("HM: VMWRITE_VMEXIT_INFO = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val)))); 1528 LogRel(("HM: MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(val))); 1529 1530 /* Paranoia */ 1531 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc) >= 512); 1532 1533 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed0)); 1534 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed1)); 1535 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed0)); 1536 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed1)); 1537 1538 val = pVM->hm.s.vmx.Msrs.u64VmcsEnum; 1539 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val)); 1540 LogRel(("HM: HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val))); 1541 1542 val = pVM->hm.s.vmx.Msrs.u64Vmfunc; 1543 if (val) 1544 { 1545 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", val)); 1546 HMVMX_REPORT_ALLOWED_FEAT(val, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING); 1547 } 1548 1549 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess)); 1550 1666 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.Msrs.u64EptVpidCaps); 1667 if (pVM->hm.s.vmx.Msrs.u64Vmfunc) 1668 hmR3VmxReportVmfuncMsr(pVM->hm.s.vmx.Msrs.u64Vmfunc); 1669 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.Msrs); 1670 1671 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess)); 1551 1672 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1552 1673 { 1553 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));1554 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));1674 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 1675 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 1555 1676 } 1556 1677 … … 1559 1680 */ 1560 1681 AssertLogRelReturn( !pVM->hm.s.fNestedPaging 1561 || (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT),1682 || (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT), 1562 1683 VERR_HM_IPE_1); 1563 1684 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuest 1564 || ( (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)1685 || ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST) 1565 1686 && pVM->hm.s.fNestedPaging), 1566 1687 VERR_HM_IPE_1); … … 1569 1690 * Enable VPID if configured and supported. 1570 1691 */ 1571 if (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)1692 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 1572 1693 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; 1573 1694 … … 1576 1697 * Enable APIC register virtualization and virtual-interrupt delivery if supported. 1577 1698 */ 1578 if ( (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT)1579 && (pVM->hm.s.vmx.Msrs. VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY))1699 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT) 1700 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY)) 1580 1701 pVM->hm.s.fVirtApicRegs = true; 1581 1702 … … 1585 1706 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI 1586 1707 * here. */ 1587 if ( (pVM->hm.s.vmx.Msrs. VmxPinCtls.n.allowed1& VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR)1588 && (pVM->hm.s.vmx.Msrs. VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT))1708 if ( (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR) 1709 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)) 1589 1710 pVM->hm.s.fPostedIntrs = true; 1590 1711 #endif … … 1595 1716 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel... 1596 1717 */ 1597 if ( !(pVM->hm.s.vmx.Msrs. VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1718 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1598 1719 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)) 1599 1720 { … … 1644 1765 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys); 1645 1766 AssertRCReturn(rc, rc); 1646 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));1767 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys)); 1647 1768 1648 1769 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 1649 1770 AssertRCReturn(rc, rc); 1650 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));1771 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys)); 1651 1772 } 1652 1773 } … … 1683 1804 } 1684 1805 1685 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));1806 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer)); 1686 1807 LogRel(("HM: Enabled VMX\n")); 1687 1808 pVM->hm.s.vmx.fEnabled = true; … … 1719 1840 LogRel(("HM: Enabled nested paging\n")); 1720 1841 if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT) 1721 LogRel(("HM: EPT flush type = Single context\n"));1842 LogRel(("HM: EPT flush type = Single context\n")); 1722 1843 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS) 1723 LogRel(("HM: EPT flush type = All contexts\n"));1844 LogRel(("HM: EPT flush type = All contexts\n")); 1724 1845 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED) 1725 LogRel(("HM: EPT flush type = Not supported\n"));1846 LogRel(("HM: EPT flush type = Not supported\n")); 1726 1847 else 1727 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt));1848 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt)); 1728 1849 1729 1850 if (pVM->hm.s.vmx.fUnrestrictedGuest) … … 1752 1873 LogRel(("HM: Enabled VPID\n")); 1753 1874 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR) 1754 LogRel(("HM: VPID flush type = Individual addresses\n"));1875 LogRel(("HM: VPID flush type = Individual addresses\n")); 1755 1876 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 1756 LogRel(("HM: VPID flush type = Single context\n"));1877 LogRel(("HM: VPID flush type = Single context\n")); 1757 1878 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS) 1758 LogRel(("HM: VPID flush type = All contexts\n"));1879 LogRel(("HM: VPID flush type = All contexts\n")); 1759 1880 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1760 LogRel(("HM: VPID flush type = Single context retain globals\n"));1881 LogRel(("HM: VPID flush type = Single context retain globals\n")); 1761 1882 else 1762 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid));1883 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid)); 1763 1884 } 1764 1885 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED) … … 1792 1913 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 1793 1914 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1794 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));1795 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));1796 1915 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr)); 1797 1916 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev)); … … 3399 3518 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM) 3400 3519 { 3401 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs. VmxEntry.n.allowed1));3402 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs. VmxEntry.n.disallowed0));3520 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1)); 3521 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0)); 3403 3522 } 3404 3523 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR) -
trunk/src/VBox/VMM/include/HMInternal.h
r73287 r73293 560 560 uint32_t cPatches; 561 561 HMTPRPATCH aPatches[64]; 562 563 struct564 {565 uint32_t u32AMDFeatureECX;566 uint32_t u32AMDFeatureEDX;567 } cpuid;568 562 569 563 /** Last recorded error code during HM ring-0 init. */
Note:
See TracChangeset
for help on using the changeset viewer.