Changeset 56364 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jun 11, 2015 2:52:57 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r56360 r56364 165 165 /** Serialize initialization in HMR0EnableAllCpus. */ 166 166 RTONCE EnableAllCpusOnce; 167 } g_H vmR0;167 } g_HmR0; 168 168 169 169 … … 344 344 { 345 345 /** @todo move this into a separate function. */ 346 g_H vmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);346 g_HmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 347 347 348 348 /* … … 350 350 * (This is only supported by some Mac OS X kernels atm.) 351 351 */ 352 int rc = g_H vmR0.lLastError = SUPR0EnableVTx(true /* fEnable */);353 g_H vmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;354 if (g_H vmR0.vmx.fUsingSUPR0EnableVTx)352 int rc = g_HmR0.lLastError = SUPR0EnableVTx(true /* fEnable */); 353 g_HmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED; 354 if (g_HmR0.vmx.fUsingSUPR0EnableVTx) 355 355 { 356 356 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc)); 357 357 if (RT_SUCCESS(rc)) 358 358 { 359 g_H vmR0.vmx.fSupported = true;359 g_HmR0.vmx.fSupported = true; 360 360 rc = SUPR0EnableVTx(false /* fEnable */); 361 361 AssertLogRelRC(rc); … … 368 368 HMR0FIRSTRC FirstRc; 369 369 hmR0FirstRcInit(&FirstRc); 370 g_H vmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);371 if (RT_SUCCESS(g_H vmR0.lLastError))372 g_H vmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc);370 g_HmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 371 if (RT_SUCCESS(g_HmR0.lLastError)) 372 g_HmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc); 373 373 } 374 if (RT_SUCCESS(g_H vmR0.lLastError))374 if (RT_SUCCESS(g_HmR0.lLastError)) 375 375 { 376 376 /* Reread in case it was changed by hmR0InitIntelCpu(). */ 377 g_H vmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);377 g_HmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 378 378 379 379 /* 380 380 * Read all relevant registers and MSRs. 381 381 */ 382 g_H vmR0.vmx.u64HostCr4 = ASMGetCR4();383 g_H vmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);384 g_H vmR0.vmx.Msrs.u64BasicInfo = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);385 g_H vmR0.vmx.Msrs.VmxPinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);386 g_H vmR0.vmx.Msrs.VmxProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);387 g_H vmR0.vmx.Msrs.VmxExit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);388 g_H vmR0.vmx.Msrs.VmxEntry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);389 g_H vmR0.vmx.Msrs.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);390 g_H vmR0.vmx.Msrs.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);391 g_H vmR0.vmx.Msrs.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);392 g_H vmR0.vmx.Msrs.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);393 g_H vmR0.vmx.Msrs.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);394 g_H vmR0.vmx.Msrs.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);382 g_HmR0.vmx.u64HostCr4 = ASMGetCR4(); 383 g_HmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER); 384 g_HmR0.vmx.Msrs.u64BasicInfo = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO); 385 g_HmR0.vmx.Msrs.VmxPinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 386 g_HmR0.vmx.Msrs.VmxProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 387 g_HmR0.vmx.Msrs.VmxExit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 388 g_HmR0.vmx.Msrs.VmxEntry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 389 g_HmR0.vmx.Msrs.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC); 390 g_HmR0.vmx.Msrs.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 391 g_HmR0.vmx.Msrs.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 392 g_HmR0.vmx.Msrs.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 393 g_HmR0.vmx.Msrs.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 394 g_HmR0.vmx.Msrs.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 395 395 /* VPID 16 bits ASID. */ 396 g_H vmR0.uMaxAsid = 0x10000; /* exclusive */397 398 if (g_H vmR0.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)396 g_HmR0.uMaxAsid = 0x10000; /* exclusive */ 397 398 if (g_HmR0.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 399 399 { 400 g_H vmR0.vmx.Msrs.VmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);401 if (g_H vmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))402 g_H vmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);403 404 if (g_H vmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC)405 g_H vmR0.vmx.Msrs.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);400 g_HmR0.vmx.Msrs.VmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 401 if (g_HmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID)) 402 g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP); 403 404 if (g_HmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) 405 g_HmR0.vmx.Msrs.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 406 406 } 407 407 408 if (!g_H vmR0.vmx.fUsingSUPR0EnableVTx)408 if (!g_HmR0.vmx.fUsingSUPR0EnableVTx) 409 409 { 410 410 /* … … 424 424 425 425 /* Set revision dword at the beginning of the structure. */ 426 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_H vmR0.vmx.Msrs.u64BasicInfo);426 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HmR0.vmx.Msrs.u64BasicInfo); 427 427 428 428 /* Make sure we don't get rescheduled to another cpu during this probe. */ … … 432 432 * Check CR4.VMXE 433 433 */ 434 g_H vmR0.vmx.u64HostCr4 = ASMGetCR4();435 if (!(g_H vmR0.vmx.u64HostCr4 & X86_CR4_VMXE))434 g_HmR0.vmx.u64HostCr4 = ASMGetCR4(); 435 if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE)) 436 436 { 437 437 /* In theory this bit could be cleared behind our back. Which would cause 438 438 #UD faults when we try to execute the VMX instructions... */ 439 ASMSetCR4(g_H vmR0.vmx.u64HostCr4 | X86_CR4_VMXE);439 ASMSetCR4(g_HmR0.vmx.u64HostCr4 | X86_CR4_VMXE); 440 440 } 441 441 … … 448 448 if (RT_SUCCESS(rc)) 449 449 { 450 g_H vmR0.vmx.fSupported = true;450 g_HmR0.vmx.fSupported = true; 451 451 VMXDisable(); 452 452 } … … 464 464 * They should fix their code, but until they do we simply refuse to run. 465 465 */ 466 g_H vmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;467 Assert(g_H vmR0.vmx.fSupported == false);466 g_HmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE; 467 Assert(g_HmR0.vmx.fSupported == false); 468 468 } 469 469 … … 471 471 if it wasn't so before (some software could incorrectly 472 472 think it's in VMX mode). */ 473 ASMSetCR4(g_H vmR0.vmx.u64HostCr4);473 ASMSetCR4(g_HmR0.vmx.u64HostCr4); 474 474 ASMSetFlags(fFlags); 475 475 … … 477 477 } 478 478 479 if (g_H vmR0.vmx.fSupported)479 if (g_HmR0.vmx.fSupported) 480 480 { 481 481 rc = VMXR0GlobalInit(); 482 482 if (RT_FAILURE(rc)) 483 g_H vmR0.lLastError = rc;483 g_HmR0.lLastError = rc; 484 484 485 485 /* 486 486 * Install the VT-x methods. 487 487 */ 488 g_H vmR0.pfnEnterSession = VMXR0Enter;489 g_H vmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;490 g_H vmR0.pfnSaveHostState = VMXR0SaveHostState;491 g_H vmR0.pfnRunGuestCode = VMXR0RunGuestCode;492 g_H vmR0.pfnEnableCpu = VMXR0EnableCpu;493 g_H vmR0.pfnDisableCpu = VMXR0DisableCpu;494 g_H vmR0.pfnInitVM = VMXR0InitVM;495 g_H vmR0.pfnTermVM = VMXR0TermVM;496 g_H vmR0.pfnSetupVM = VMXR0SetupVM;488 g_HmR0.pfnEnterSession = VMXR0Enter; 489 g_HmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback; 490 g_HmR0.pfnSaveHostState = VMXR0SaveHostState; 491 g_HmR0.pfnRunGuestCode = VMXR0RunGuestCode; 492 g_HmR0.pfnEnableCpu = VMXR0EnableCpu; 493 g_HmR0.pfnDisableCpu = VMXR0DisableCpu; 494 g_HmR0.pfnInitVM = VMXR0InitVM; 495 g_HmR0.pfnTermVM = VMXR0TermVM; 496 g_HmR0.pfnSetupVM = VMXR0SetupVM; 497 497 498 498 /* … … 500 500 * Timer Does Not Count Down at the Rate Specified" erratum. 501 501 */ 502 if (g_H vmR0.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)502 if (g_HmR0.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER) 503 503 { 504 g_H vmR0.vmx.fUsePreemptTimer = true;505 g_H vmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.Msrs.u64Misc);504 g_HmR0.vmx.fUsePreemptTimer = true; 505 g_HmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HmR0.vmx.Msrs.u64Misc); 506 506 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum()) 507 g_H vmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */507 g_HmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */ 508 508 } 509 509 } … … 511 511 #ifdef LOG_ENABLED 512 512 else 513 SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_H vmR0.lLastError);513 SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_HmR0.lLastError); 514 514 #endif 515 515 } 516 516 else 517 g_H vmR0.lLastError = VERR_VMX_NO_VMX;517 g_HmR0.lLastError = VERR_VMX_NO_VMX; 518 518 return VINF_SUCCESS; 519 519 } … … 532 532 */ 533 533 int rc; 534 if ( (g_H vmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)534 if ( (g_HmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM) 535 535 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR) 536 536 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR) … … 543 543 if (RT_FAILURE(rc)) 544 544 { 545 g_H vmR0.lLastError = rc;545 g_HmR0.lLastError = rc; 546 546 return rc; 547 547 } … … 550 550 * Install the AMD-V methods. 551 551 */ 552 g_H vmR0.pfnEnterSession = SVMR0Enter;553 g_H vmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;554 g_H vmR0.pfnSaveHostState = SVMR0SaveHostState;555 g_H vmR0.pfnRunGuestCode = SVMR0RunGuestCode;556 g_H vmR0.pfnEnableCpu = SVMR0EnableCpu;557 g_H vmR0.pfnDisableCpu = SVMR0DisableCpu;558 g_H vmR0.pfnInitVM = SVMR0InitVM;559 g_H vmR0.pfnTermVM = SVMR0TermVM;560 g_H vmR0.pfnSetupVM = SVMR0SetupVM;552 g_HmR0.pfnEnterSession = SVMR0Enter; 553 g_HmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback; 554 g_HmR0.pfnSaveHostState = SVMR0SaveHostState; 555 g_HmR0.pfnRunGuestCode = SVMR0RunGuestCode; 556 g_HmR0.pfnEnableCpu = SVMR0EnableCpu; 557 g_HmR0.pfnDisableCpu = SVMR0DisableCpu; 558 g_HmR0.pfnInitVM = SVMR0InitVM; 559 g_HmR0.pfnTermVM = SVMR0TermVM; 560 g_HmR0.pfnSetupVM = SVMR0SetupVM; 561 561 562 562 /* Query AMD features. */ 563 563 uint32_t u32Dummy; 564 ASMCpuId(0x8000000a, &g_H vmR0.svm.u32Rev, &g_HvmR0.uMaxAsid, &u32Dummy, &g_HvmR0.svm.u32Features);564 ASMCpuId(0x8000000a, &g_HmR0.svm.u32Rev, &g_HmR0.uMaxAsid, &u32Dummy, &g_HmR0.svm.u32Features); 565 565 566 566 /* … … 581 581 { 582 582 /* Read the HWCR MSR for diagnostics. */ 583 g_H vmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);584 g_H vmR0.svm.fSupported = true;583 g_HmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR); 584 g_HmR0.svm.fSupported = true; 585 585 } 586 586 else 587 587 { 588 g_H vmR0.lLastError = rc;588 g_HmR0.lLastError = rc; 589 589 if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE) 590 590 rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */ … … 594 594 { 595 595 rc = VINF_SUCCESS; /* Don't fail if AMD-V is not supported. See @bugref{6785}. */ 596 g_H vmR0.lLastError = VERR_SVM_NO_SVM;596 g_HmR0.lLastError = VERR_SVM_NO_SVM; 597 597 } 598 598 return rc; … … 610 610 * Initialize the globals. 611 611 */ 612 g_H vmR0.fEnabled = false;612 g_HmR0.fEnabled = false; 613 613 static RTONCE s_OnceInit = RTONCE_INITIALIZER; 614 g_H vmR0.EnableAllCpusOnce = s_OnceInit;615 for (unsigned i = 0; i < RT_ELEMENTS(g_H vmR0.aCpuInfo); i++)616 { 617 g_H vmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;618 g_H vmR0.aCpuInfo[i].idCpu = NIL_RTCPUID;614 g_HmR0.EnableAllCpusOnce = s_OnceInit; 615 for (unsigned i = 0; i < RT_ELEMENTS(g_HmR0.aCpuInfo); i++) 616 { 617 g_HmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 618 g_HmR0.aCpuInfo[i].idCpu = NIL_RTCPUID; 619 619 } 620 620 621 621 /* Fill in all callbacks with placeholders. */ 622 g_H vmR0.pfnEnterSession = hmR0DummyEnter;623 g_H vmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback;624 g_H vmR0.pfnSaveHostState = hmR0DummySaveHostState;625 g_H vmR0.pfnRunGuestCode = hmR0DummyRunGuestCode;626 g_H vmR0.pfnEnableCpu = hmR0DummyEnableCpu;627 g_H vmR0.pfnDisableCpu = hmR0DummyDisableCpu;628 g_H vmR0.pfnInitVM = hmR0DummyInitVM;629 g_H vmR0.pfnTermVM = hmR0DummyTermVM;630 g_H vmR0.pfnSetupVM = hmR0DummySetupVM;622 g_HmR0.pfnEnterSession = hmR0DummyEnter; 623 g_HmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback; 624 g_HmR0.pfnSaveHostState = hmR0DummySaveHostState; 625 g_HmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 626 g_HmR0.pfnEnableCpu = hmR0DummyEnableCpu; 627 g_HmR0.pfnDisableCpu = hmR0DummyDisableCpu; 628 g_HmR0.pfnInitVM = hmR0DummyInitVM; 629 g_HmR0.pfnTermVM = hmR0DummyTermVM; 630 g_HmR0.pfnSetupVM = hmR0DummySetupVM; 631 631 632 632 /* Default is global VT-x/AMD-V init. */ 633 g_H vmR0.fGlobalInit = true;633 g_HmR0.fGlobalInit = true; 634 634 635 635 /* 636 636 * Make sure aCpuInfo is big enough for all the CPUs on this system. 637 637 */ 638 if (RTMpGetArraySize() > RT_ELEMENTS(g_H vmR0.aCpuInfo))639 { 640 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_H vmR0.aCpuInfo)));638 if (RTMpGetArraySize() > RT_ELEMENTS(g_HmR0.aCpuInfo)) 639 { 640 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_HmR0.aCpuInfo))); 641 641 return VERR_TOO_MANY_CPUS; 642 642 } … … 660 660 if (ASMIsValidExtRange(uMaxExtLeaf)) 661 661 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, 662 &g_H vmR0.cpuid.u32AMDFeatureECX,663 &g_H vmR0.cpuid.u32AMDFeatureEDX);662 &g_HmR0.cpuid.u32AMDFeatureECX, 663 &g_HmR0.cpuid.u32AMDFeatureEDX); 664 664 else 665 g_H vmR0.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureEDX = 0;665 g_HmR0.cpuid.u32AMDFeatureECX = g_HmR0.cpuid.u32AMDFeatureEDX = 0; 666 666 667 667 /* Go to CPU specific initialization code. */ … … 680 680 } 681 681 else 682 g_H vmR0.lLastError = VERR_HM_UNKNOWN_CPU;682 g_HmR0.lLastError = VERR_HM_UNKNOWN_CPU; 683 683 } 684 684 else 685 g_H vmR0.lLastError = VERR_HM_UNKNOWN_CPU;685 g_HmR0.lLastError = VERR_HM_UNKNOWN_CPU; 686 686 } 687 687 else 688 g_H vmR0.lLastError = VERR_HM_NO_CPUID;688 g_HmR0.lLastError = VERR_HM_NO_CPUID; 689 689 690 690 /* … … 692 692 * when brought offline/online or suspending/resuming. 693 693 */ 694 if (!g_H vmR0.vmx.fUsingSUPR0EnableVTx)694 if (!g_HmR0.vmx.fUsingSUPR0EnableVTx) 695 695 { 696 696 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL); … … 715 715 { 716 716 int rc; 717 if ( g_H vmR0.vmx.fSupported718 && g_H vmR0.vmx.fUsingSUPR0EnableVTx)717 if ( g_HmR0.vmx.fSupported 718 && g_HmR0.vmx.fUsingSUPR0EnableVTx) 719 719 { 720 720 /* 721 721 * Simple if the host OS manages VT-x. 722 722 */ 723 Assert(g_H vmR0.fGlobalInit);723 Assert(g_HmR0.fGlobalInit); 724 724 rc = SUPR0EnableVTx(false /* fEnable */); 725 725 726 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_H vmR0.aCpuInfo); iCpu++)726 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HmR0.aCpuInfo); iCpu++) 727 727 { 728 g_H vmR0.aCpuInfo[iCpu].fConfigured = false;729 Assert(g_H vmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);728 g_HmR0.aCpuInfo[iCpu].fConfigured = false; 729 Assert(g_HmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ); 730 730 } 731 731 } 732 732 else 733 733 { 734 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);734 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 735 735 736 736 /* Doesn't really matter if this fails. */ … … 741 741 * Disable VT-x/AMD-V on all CPUs if we enabled it before. 742 742 */ 743 if (g_H vmR0.fGlobalInit)743 if (g_HmR0.fGlobalInit) 744 744 { 745 745 HMR0FIRSTRC FirstRc; … … 754 754 * Free the per-cpu pages used for VT-x and AMD-V. 755 755 */ 756 for (unsigned i = 0; i < RT_ELEMENTS(g_H vmR0.aCpuInfo); i++)756 for (unsigned i = 0; i < RT_ELEMENTS(g_HmR0.aCpuInfo); i++) 757 757 { 758 if (g_H vmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)758 if (g_HmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ) 759 759 { 760 RTR0MemObjFree(g_H vmR0.aCpuInfo[i].hMemObj, false);761 g_H vmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;760 RTR0MemObjFree(g_HmR0.aCpuInfo[i].hMemObj, false); 761 g_HmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 762 762 } 763 763 } … … 768 768 * should move into their respective modules. */ 769 769 /* Finally, call global VT-x/AMD-V termination. */ 770 if (g_H vmR0.vmx.fSupported)770 if (g_HmR0.vmx.fSupported) 771 771 VMXR0GlobalTerm(); 772 else if (g_H vmR0.svm.fSupported)772 else if (g_HmR0.svm.fSupported) 773 773 SVMR0GlobalTerm(); 774 774 … … 828 828 static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu) 829 829 { 830 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[idCpu];830 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 831 831 832 832 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ 833 Assert(idCpu < RT_ELEMENTS(g_H vmR0.aCpuInfo));833 Assert(idCpu < RT_ELEMENTS(g_HmR0.aCpuInfo)); 834 834 Assert(!pCpu->fConfigured); 835 835 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 839 839 840 840 int rc; 841 if (g_H vmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx)842 rc = g_H vmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HvmR0.vmx.Msrs);841 if (g_HmR0.vmx.fSupported && g_HmR0.vmx.fUsingSUPR0EnableVTx) 842 rc = g_HmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.vmx.Msrs); 843 843 else 844 844 { … … 847 847 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0 /* iPage */); 848 848 849 if (g_H vmR0.vmx.fSupported)850 rc = g_H vmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);849 if (g_HmR0.vmx.fSupported) 850 rc = g_HmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HmR0.vmx.Msrs); 851 851 else 852 rc = g_H vmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */);852 rc = g_HmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */); 853 853 } 854 854 if (RT_SUCCESS(rc)) … … 870 870 PVM pVM = (PVM)pvUser1; /* can be NULL! */ 871 871 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; 872 AssertReturnVoid(g_H vmR0.fGlobalInit);872 AssertReturnVoid(g_HmR0.fGlobalInit); 873 873 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 874 874 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu)); … … 893 893 * notification. Kind of unlikely though, so ignored for now. 894 894 */ 895 AssertReturn(!g_H vmR0.fEnabled, VERR_HM_ALREADY_ENABLED_IPE);896 ASMAtomicWriteBool(&g_H vmR0.fEnabled, true);895 AssertReturn(!g_HmR0.fEnabled, VERR_HM_ALREADY_ENABLED_IPE); 896 ASMAtomicWriteBool(&g_HmR0.fEnabled, true); 897 897 898 898 /* 899 899 * The global init variable is set by the first VM. 900 900 */ 901 g_H vmR0.fGlobalInit = pVM->hm.s.fGlobalInit;901 g_HmR0.fGlobalInit = pVM->hm.s.fGlobalInit; 902 902 903 903 #ifdef VBOX_STRICT 904 for (unsigned i = 0; i < RT_ELEMENTS(g_H vmR0.aCpuInfo); i++)905 { 906 Assert(g_H vmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);907 Assert(!g_H vmR0.aCpuInfo[i].fConfigured);908 Assert(!g_H vmR0.aCpuInfo[i].cTlbFlushes);909 Assert(!g_H vmR0.aCpuInfo[i].uCurrentAsid);904 for (unsigned i = 0; i < RT_ELEMENTS(g_HmR0.aCpuInfo); i++) 905 { 906 Assert(g_HmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 907 Assert(!g_HmR0.aCpuInfo[i].fConfigured); 908 Assert(!g_HmR0.aCpuInfo[i].cTlbFlushes); 909 Assert(!g_HmR0.aCpuInfo[i].uCurrentAsid); 910 910 } 911 911 #endif 912 912 913 913 int rc; 914 if ( g_H vmR0.vmx.fSupported915 && g_H vmR0.vmx.fUsingSUPR0EnableVTx)914 if ( g_HmR0.vmx.fSupported 915 && g_HmR0.vmx.fUsingSUPR0EnableVTx) 916 916 { 917 917 /* … … 922 922 { 923 923 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 924 g_H vmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;924 g_HmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true; 925 925 } 926 926 else … … 933 933 */ 934 934 /* Allocate one page per cpu for the global VT-x and AMD-V pages */ 935 for (unsigned i = 0; i < RT_ELEMENTS(g_H vmR0.aCpuInfo); i++)935 for (unsigned i = 0; i < RT_ELEMENTS(g_HmR0.aCpuInfo); i++) 936 936 { 937 Assert(g_H vmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);937 Assert(g_HmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 938 938 939 939 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i))) 940 940 { 941 rc = RTR0MemObjAllocCont(&g_H vmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, false /* executable R0 mapping */);941 rc = RTR0MemObjAllocCont(&g_HmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, false /* executable R0 mapping */); 942 942 AssertLogRelRCReturn(rc, rc); 943 943 944 void *pvR0 = RTR0MemObjAddress(g_H vmR0.aCpuInfo[i].hMemObj); Assert(pvR0);944 void *pvR0 = RTR0MemObjAddress(g_HmR0.aCpuInfo[i].hMemObj); Assert(pvR0); 945 945 ASMMemZeroPage(pvR0); 946 946 } … … 951 951 952 952 if ( RT_SUCCESS(rc) 953 && g_H vmR0.fGlobalInit)953 && g_HmR0.fGlobalInit) 954 954 { 955 955 /* First time, so initialize each cpu/core. */ … … 974 974 { 975 975 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 976 if (ASMAtomicReadBool(&g_H vmR0.fSuspended))976 if (ASMAtomicReadBool(&g_HmR0.fSuspended)) 977 977 return VERR_HM_SUSPEND_PENDING; 978 978 979 return RTOnce(&g_H vmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM);979 return RTOnce(&g_HmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM); 980 980 } 981 981 … … 991 991 static int hmR0DisableCpu(RTCPUID idCpu) 992 992 { 993 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[idCpu];994 995 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);993 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 994 995 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 996 996 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 997 997 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ 998 Assert(idCpu < RT_ELEMENTS(g_H vmR0.aCpuInfo));998 Assert(idCpu < RT_ELEMENTS(g_HmR0.aCpuInfo)); 999 999 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ); 1000 1000 AssertRelease(idCpu == RTMpCpuId()); … … 1009 1009 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1010 1010 1011 rc = g_H vmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);1011 rc = g_HmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage); 1012 1012 AssertRCReturn(rc, rc); 1013 1013 … … 1032 1032 { 1033 1033 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; NOREF(pvUser1); 1034 AssertReturnVoid(g_H vmR0.fGlobalInit);1034 AssertReturnVoid(g_HmR0.fGlobalInit); 1035 1035 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu)); 1036 1036 } … … 1063 1063 { 1064 1064 NOREF(pvData); 1065 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);1065 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 1066 1066 1067 1067 /* … … 1104 1104 { 1105 1105 NOREF(pvUser); 1106 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);1106 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 1107 1107 1108 1108 #ifdef LOG_ENABLED … … 1114 1114 1115 1115 if (enmEvent == RTPOWEREVENT_SUSPEND) 1116 ASMAtomicWriteBool(&g_H vmR0.fSuspended, true);1117 1118 if (g_H vmR0.fEnabled)1116 ASMAtomicWriteBool(&g_HmR0.fSuspended, true); 1117 1118 if (g_HmR0.fEnabled) 1119 1119 { 1120 1120 int rc; … … 1124 1124 if (enmEvent == RTPOWEREVENT_SUSPEND) 1125 1125 { 1126 if (g_H vmR0.fGlobalInit)1126 if (g_HmR0.fGlobalInit) 1127 1127 { 1128 1128 /* Turn off VT-x or AMD-V on all CPUs. */ … … 1136 1136 /* Reinit the CPUs from scratch as the suspend state might have 1137 1137 messed with the MSRs. (lousy BIOSes as usual) */ 1138 if (g_H vmR0.vmx.fSupported)1138 if (g_HmR0.vmx.fSupported) 1139 1139 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 1140 1140 else … … 1147 1147 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc); 1148 1148 #endif 1149 if (g_H vmR0.fGlobalInit)1149 if (g_HmR0.fGlobalInit) 1150 1150 { 1151 1151 /* Turn VT-x or AMD-V back on on all CPUs. */ … … 1158 1158 1159 1159 if (enmEvent == RTPOWEREVENT_RESUME) 1160 ASMAtomicWriteBool(&g_H vmR0.fSuspended, false);1160 ASMAtomicWriteBool(&g_HmR0.fSuspended, false); 1161 1161 } 1162 1162 … … 1183 1183 1184 1184 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 1185 if (ASMAtomicReadBool(&g_H vmR0.fSuspended))1185 if (ASMAtomicReadBool(&g_HmR0.fSuspended)) 1186 1186 return VERR_HM_SUSPEND_PENDING; 1187 1187 … … 1189 1189 * Copy globals to the VM structure. 1190 1190 */ 1191 pVM->hm.s.vmx.fSupported = g_H vmR0.vmx.fSupported;1192 pVM->hm.s.svm.fSupported = g_H vmR0.svm.fSupported;1193 1194 pVM->hm.s.vmx.fUsePreemptTimer &= g_H vmR0.vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */1195 pVM->hm.s.vmx.cPreemptTimerShift = g_H vmR0.vmx.cPreemptTimerShift;1196 pVM->hm.s.vmx.u64HostCr4 = g_H vmR0.vmx.u64HostCr4;1197 pVM->hm.s.vmx.u64HostEfer = g_H vmR0.vmx.u64HostEfer;1198 pVM->hm.s.vmx.Msrs = g_H vmR0.vmx.Msrs;1199 pVM->hm.s.svm.u64MsrHwcr = g_H vmR0.svm.u64MsrHwcr;1200 pVM->hm.s.svm.u32Rev = g_H vmR0.svm.u32Rev;1201 pVM->hm.s.svm.u32Features = g_H vmR0.svm.u32Features;1202 pVM->hm.s.cpuid.u32AMDFeatureECX = g_H vmR0.cpuid.u32AMDFeatureECX;1203 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_H vmR0.cpuid.u32AMDFeatureEDX;1204 pVM->hm.s.lLastError = g_H vmR0.lLastError;1205 pVM->hm.s.uMaxAsid = g_H vmR0.uMaxAsid;1191 pVM->hm.s.vmx.fSupported = g_HmR0.vmx.fSupported; 1192 pVM->hm.s.svm.fSupported = g_HmR0.svm.fSupported; 1193 1194 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */ 1195 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.vmx.cPreemptTimerShift; 1196 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.vmx.u64HostCr4; 1197 pVM->hm.s.vmx.u64HostEfer = g_HmR0.vmx.u64HostEfer; 1198 pVM->hm.s.vmx.Msrs = g_HmR0.vmx.Msrs; 1199 pVM->hm.s.svm.u64MsrHwcr = g_HmR0.svm.u64MsrHwcr; 1200 pVM->hm.s.svm.u32Rev = g_HmR0.svm.u32Rev; 1201 pVM->hm.s.svm.u32Features = g_HmR0.svm.u32Features; 1202 pVM->hm.s.cpuid.u32AMDFeatureECX = g_HmR0.cpuid.u32AMDFeatureECX; 1203 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_HmR0.cpuid.u32AMDFeatureEDX; 1204 pVM->hm.s.lLastError = g_HmR0.lLastError; 1205 pVM->hm.s.uMaxAsid = g_HmR0.uMaxAsid; 1206 1206 1207 1207 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */ … … 1231 1231 * Call the hardware specific initialization method. 1232 1232 */ 1233 return g_H vmR0.pfnInitVM(pVM);1233 return g_HmR0.pfnInitVM(pVM); 1234 1234 } 1235 1235 … … 1252 1252 * mess with VT-x/AMD-V features on the CPU, currently all they do is free memory so this is safe. 1253 1253 */ 1254 return g_H vmR0.pfnTermVM(pVM);1254 return g_HmR0.pfnTermVM(pVM); 1255 1255 } 1256 1256 … … 1270 1270 1271 1271 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 1272 AssertReturn(!ASMAtomicReadBool(&g_H vmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1272 AssertReturn(!ASMAtomicReadBool(&g_HmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1273 1273 1274 1274 /* On first entry we'll sync everything. */ … … 1286 1286 /* Enable VT-x or AMD-V if local init is required. */ 1287 1287 int rc; 1288 if (!g_H vmR0.fGlobalInit)1289 { 1290 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);1288 if (!g_HmR0.fGlobalInit) 1289 { 1290 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 1291 1291 rc = hmR0EnableCpu(pVM, idCpu); 1292 1292 if (RT_FAILURE(rc)) … … 1298 1298 1299 1299 /* Setup VT-x or AMD-V. */ 1300 rc = g_H vmR0.pfnSetupVM(pVM);1300 rc = g_HmR0.pfnSetupVM(pVM); 1301 1301 1302 1302 /* Disable VT-x or AMD-V if local init was done before. */ 1303 if (!g_H vmR0.fGlobalInit)1304 { 1305 Assert(!g_H vmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);1303 if (!g_HmR0.fGlobalInit) 1304 { 1305 Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx); 1306 1306 int rc2 = hmR0DisableCpu(idCpu); 1307 1307 AssertRC(rc2); … … 1328 1328 int rc = VINF_SUCCESS; 1329 1329 RTCPUID idCpu = RTMpCpuId(); 1330 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[idCpu];1330 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 1331 1331 AssertPtr(pCpu); 1332 1332 … … 1356 1356 { 1357 1357 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1358 AssertReturn(!ASMAtomicReadBool(&g_H vmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1358 AssertReturn(!ASMAtomicReadBool(&g_HmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1359 1359 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1360 1360 … … 1369 1369 1370 1370 RTCPUID idCpu = RTMpCpuId(); 1371 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[idCpu];1371 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 1372 1372 Assert(pCpu); 1373 1373 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1374 1374 1375 rc = g_H vmR0.pfnEnterSession(pVM, pVCpu, pCpu);1375 rc = g_HmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1376 1376 AssertMsgRCReturn(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1377 1377 1378 1378 /* Load the host-state as we may be resuming code after a longjmp and quite 1379 1379 possibly now be scheduled on a different CPU. */ 1380 rc = g_H vmR0.pfnSaveHostState(pVM, pVCpu);1380 rc = g_HmR0.pfnSaveHostState(pVM, pVCpu); 1381 1381 AssertMsgRCReturn(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1382 1382 … … 1408 1408 1409 1409 RTCPUID idCpu = RTMpCpuId(); 1410 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[idCpu];1411 1412 if ( !g_H vmR0.fGlobalInit1410 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 1411 1412 if ( !g_HmR0.fGlobalInit 1413 1413 && pCpu->fConfigured) 1414 1414 { … … 1439 1439 PVMCPU pVCpu = (PVMCPU)pvUser; 1440 1440 Assert(pVCpu); 1441 Assert(g_H vmR0.pfnThreadCtxCallback);1442 1443 g_H vmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit);1441 Assert(g_HmR0.pfnThreadCtxCallback); 1442 1443 g_HmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HmR0.fGlobalInit); 1444 1444 } 1445 1445 … … 1461 1461 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 1462 1462 { 1463 PHMGLOBALCPUINFO pCpu = &g_H vmR0.aCpuInfo[RTMpCpuId()];1463 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[RTMpCpuId()]; 1464 1464 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1465 1465 Assert(pCpu->fConfigured); 1466 AssertReturn(!ASMAtomicReadBool(&g_H vmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1466 AssertReturn(!ASMAtomicReadBool(&g_HmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1467 1467 } 1468 1468 #endif … … 1474 1474 #endif 1475 1475 1476 int rc = g_H vmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));1476 int rc = g_HmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu)); 1477 1477 1478 1478 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1550 1550 VMMR0_INT_DECL(bool) HMR0SuspendPending(void) 1551 1551 { 1552 return ASMAtomicReadBool(&g_H vmR0.fSuspended);1552 return ASMAtomicReadBool(&g_HmR0.fSuspended); 1553 1553 } 1554 1554 … … 1564 1564 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1565 1565 RTCPUID idCpu = RTMpCpuId(); 1566 Assert(idCpu < RT_ELEMENTS(g_H vmR0.aCpuInfo));1567 return &g_H vmR0.aCpuInfo[idCpu];1566 Assert(idCpu < RT_ELEMENTS(g_HmR0.aCpuInfo)); 1567 return &g_HmR0.aCpuInfo[idCpu]; 1568 1568 } 1569 1569 … … 1578 1578 VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu) 1579 1579 { 1580 Assert(idCpu < RT_ELEMENTS(g_H vmR0.aCpuInfo));1581 return &g_H vmR0.aCpuInfo[idCpu];1580 Assert(idCpu < RT_ELEMENTS(g_HmR0.aCpuInfo)); 1581 return &g_HmR0.aCpuInfo[idCpu]; 1582 1582 } 1583 1583 … … 1646 1646 1647 1647 /* No such issues with AMD-V */ 1648 if (!g_H vmR0.vmx.fSupported)1648 if (!g_HmR0.vmx.fSupported) 1649 1649 return VINF_SUCCESS; 1650 1650 … … 1668 1668 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x, 1669 1669 regardless of whether we're currently using VT-x or not. */ 1670 if (g_H vmR0.vmx.fUsingSUPR0EnableVTx)1670 if (g_HmR0.vmx.fUsingSUPR0EnableVTx) 1671 1671 { 1672 1672 *pfVTxDisabled = SUPR0SuspendVTxOnCpu(); … … 1678 1678 1679 1679 /* Nothing to do if we haven't enabled VT-x. */ 1680 if (!g_H vmR0.fEnabled)1680 if (!g_HmR0.fEnabled) 1681 1681 return VINF_SUCCESS; 1682 1682 1683 1683 /* Local init implies the CPU is currently not in VMX root mode. */ 1684 if (!g_H vmR0.fGlobalInit)1684 if (!g_HmR0.fGlobalInit) 1685 1685 return VINF_SUCCESS; 1686 1686 … … 1710 1710 return; /* nothing to do */ 1711 1711 1712 Assert(g_H vmR0.vmx.fSupported);1713 if (g_H vmR0.vmx.fUsingSUPR0EnableVTx)1712 Assert(g_HmR0.vmx.fSupported); 1713 if (g_HmR0.vmx.fUsingSUPR0EnableVTx) 1714 1714 SUPR0ResumeVTxOnCpu(fVTxDisabled); 1715 1715 else 1716 1716 { 1717 Assert(g_H vmR0.fEnabled);1718 Assert(g_H vmR0.fGlobalInit);1717 Assert(g_HmR0.fEnabled); 1718 Assert(g_HmR0.fGlobalInit); 1719 1719 1720 1720 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu(); … … 1723 1723 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1724 1724 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1725 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_H vmR0.vmx.Msrs);1725 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HmR0.vmx.Msrs); 1726 1726 } 1727 1727 }
Note:
See TracChangeset
for help on using the changeset viewer.