Changeset 87537 in vbox for trunk/src/VBox/VMM/VMMR0/HMR0.cpp
- Timestamp:
- Feb 2, 2021 3:01:24 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r87536 r87537 94 94 *********************************************************************************************************************************/ 95 95 /** The active ring-0 HM operations (copied from one of the table at init). */ 96 static HMR0VTABLE g_HmR0Ops; 96 static HMR0VTABLE g_HmR0Ops; 97 /** Indicates whether the host is suspending or not. We'll refuse a few 98 * actions when the host is being suspended to speed up the suspending and 99 * avoid trouble. */ 100 static bool volatile g_fHmSuspended; 101 /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's 102 * enabled and disabled each time it's used to execute guest code. */ 103 static bool g_fHmGlobalInit; 97 104 98 105 /** Maximum allowed ASID/VPID (inclusive). … … 100 107 * Couldn't immediately find any docs on AMD-V, but suspect it is 101 108 * exclusive there as well given how hmR0SvmFlushTaggedTlb() use it. */ 102 uint32_t g_uHmMaxAsid;109 uint32_t g_uHmMaxAsid; 103 110 104 111 105 112 /** Set if VT-x (VMX) is supported by the CPU. */ 106 bool g_fHmVmxSupported = false;113 bool g_fHmVmxSupported = false; 107 114 /** Whether we're using the preemption timer or not. */ 108 bool g_fHmVmxUsePreemptTimer;115 bool g_fHmVmxUsePreemptTimer; 109 116 /** The shift mask employed by the VMX-Preemption timer. */ 110 uint8_t g_cHmVmxPreemptTimerShift; 117 uint8_t g_cHmVmxPreemptTimerShift; 118 /** Whether we're using SUPR0EnableVTx or not. */ 119 static bool g_fHmVmxUsingSUPR0EnableVTx = false; 120 /** Set if we've called SUPR0EnableVTx(true) and should disable it during 121 * module termination. */ 122 static bool g_fHmVmxCalledSUPR0EnableVTx = false; 111 123 /** Host CR4 value (set by ring-0 VMX init) */ 112 uint64_t g_uHmVmxHostCr4;124 uint64_t g_uHmVmxHostCr4; 113 125 /** Host EFER value (set by ring-0 VMX init) */ 114 uint64_t g_uHmVmxHostMsrEfer;126 uint64_t g_uHmVmxHostMsrEfer; 115 127 /** Host SMM monitor control (used for logging/diagnostics) */ 116 uint64_t g_uHmVmxHostSmmMonitorCtl; 128 uint64_t g_uHmVmxHostSmmMonitorCtl; 129 117 130 118 131 /** Set if AMD-V is supported by the CPU. */ 119 bool g_fHmSvmSupported = false;132 bool g_fHmSvmSupported = false; 120 133 /** SVM revision. */ 121 uint32_t g_uHmSvmRev;134 uint32_t g_uHmSvmRev; 122 135 /** SVM feature bits from cpuid 0x8000000a */ 123 uint32_t g_uHmSvmFeatures; 136 uint32_t g_uHmSvmFeatures; 137 124 138 125 139 /** MSRs. */ 126 SUPHWVIRTMSRS g_HmMsrs; 127 128 129 /** 130 * Global data. 131 */ 132 static struct 133 { 134 /** Per CPU globals. */ 135 HMPHYSCPU aCpuInfo[RTCPUSET_MAX_CPUS]; 136 137 /** Hardware-virtualization data. */ 138 struct 139 { 140 union 141 { 142 /** VT-x data. */ 143 struct 144 { 145 /** Last instruction error. */ 146 uint32_t ulLastInstrError; 147 /** Whether we're using SUPR0EnableVTx or not. */ 148 bool fUsingSUPR0EnableVTx; 149 /** Set if we've called SUPR0EnableVTx(true) and should disable it during 150 * module termination. */ 151 bool fCalledSUPR0EnableVTx; 152 } vmx; 153 } u; 154 } hwvirt; 155 156 /** Last recorded error code during HM ring-0 init. */ 157 int32_t rcInit; 158 159 /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's 160 * enabled and disabled each time it's used to execute guest code. */ 161 bool fGlobalInit; 162 /** Indicates whether the host is suspending or not. We'll refuse a few 163 * actions when the host is being suspended to speed up the suspending and 164 * avoid trouble. */ 165 bool volatile fSuspended; 166 167 /** Whether we've already initialized all CPUs. 168 * @remarks We could check the EnableAllCpusOnce state, but this is 169 * simpler and hopefully easier to understand. */ 170 bool fEnabled; 171 /** Serialize initialization in HMR0EnableAllCpus. */ 172 RTONCE EnableAllCpusOnce; 173 } g_HmR0; 140 SUPHWVIRTMSRS g_HmMsrs; 141 142 /** Last recorded error code during HM ring-0 init. */ 143 static int32_t g_rcHmInit = VINF_SUCCESS; 144 145 /** Per CPU globals. */ 146 static HMPHYSCPU g_aHmCpuInfo[RTCPUSET_MAX_CPUS]; 147 148 /** Whether we've already initialized all CPUs. 149 * @remarks We could check the EnableAllCpusOnce state, but this is 150 * simpler and hopefully easier to understand. */ 151 static bool g_fHmEnabled = false; 152 /** Serialize initialization in HMR0EnableAllCpus. */ 153 static RTONCE g_HmEnableAllCpusOnce = RTONCE_INITIALIZER; 174 154 175 155 … … 377 357 */ 378 358 int rc; 379 g_ HmR0.rcInit = rc = SUPR0EnableVTx(true /* fEnable */);380 g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;381 if (g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)359 g_rcHmInit = rc = SUPR0EnableVTx(true /* fEnable */); 360 g_fHmVmxUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED; 361 if (g_fHmVmxUsingSUPR0EnableVTx) 382 362 { 383 363 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc)); … … 394 374 HMR0FIRSTRC FirstRc; 395 375 hmR0FirstRcInit(&FirstRc); 396 g_ HmR0.rcInit = rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);376 g_rcHmInit = rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 397 377 if (RT_SUCCESS(rc)) 398 g_ HmR0.rcInit = rc = hmR0FirstRcGetStatus(&FirstRc);378 g_rcHmInit = rc = hmR0FirstRcGetStatus(&FirstRc); 399 379 } 400 380 … … 424 404 * to really verify if VT-x is usable. 425 405 */ 426 if (!g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)406 if (!g_fHmVmxUsingSUPR0EnableVTx) 427 407 { 428 408 /* Allocate a temporary VMXON region. */ … … 471 451 * They should fix their code, but until they do we simply refuse to run. 472 452 */ 473 g_ HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE;453 g_rcHmInit = VERR_VMX_IN_VMX_ROOT_MODE; 474 454 Assert(g_fHmVmxSupported == false); 475 455 } … … 512 492 else 513 493 { 514 g_ HmR0.rcInit = rc;494 g_rcHmInit = rc; 515 495 g_fHmVmxSupported = false; 516 496 } … … 519 499 #ifdef LOG_ENABLED 520 500 else 521 SUPR0Printf("hmR0InitIntelCpu failed with rc=%Rrc\n", g_ HmR0.rcInit);501 SUPR0Printf("hmR0InitIntelCpu failed with rc=%Rrc\n", g_rcHmInit); 522 502 #endif 523 503 return VINF_SUCCESS; … … 586 566 else 587 567 { 588 g_ HmR0.rcInit = rc;568 g_rcHmInit = rc; 589 569 if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE) 590 570 rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */ … … 592 572 } 593 573 else 594 g_ HmR0.rcInit = rc;574 g_rcHmInit = rc; 595 575 return rc; 596 576 } … … 607 587 * Initialize the globals. 608 588 */ 609 g_HmR0.fEnabled = false; 610 static RTONCE s_OnceInit = RTONCE_INITIALIZER; 611 g_HmR0.EnableAllCpusOnce = s_OnceInit; 612 for (unsigned i = 0; i < RT_ELEMENTS(g_HmR0.aCpuInfo); i++) 613 { 614 g_HmR0.aCpuInfo[i].idCpu = NIL_RTCPUID; 615 g_HmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 616 g_HmR0.aCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS; 617 g_HmR0.aCpuInfo[i].pvMemObj = NULL; 589 g_fHmEnabled = false; 590 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++) 591 { 592 g_aHmCpuInfo[i].idCpu = NIL_RTCPUID; 593 g_aHmCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 594 g_aHmCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS; 595 g_aHmCpuInfo[i].pvMemObj = NULL; 618 596 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 619 g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ;620 g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS;621 g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm = NULL;597 g_aHmCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ; 598 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS; 599 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = NULL; 622 600 #endif 623 601 } 624 602 625 603 /* Fill in all callbacks with placeholders. */ 626 g_HmR0Ops 604 g_HmR0Ops = g_HmR0OpsDummy; 627 605 628 606 /* Default is global VT-x/AMD-V init. */ 629 g_ HmR0.fGlobalInit= true;607 g_fHmGlobalInit = true; 630 608 631 609 g_fHmVmxSupported = false; … … 636 614 * Make sure aCpuInfo is big enough for all the CPUs on this system. 637 615 */ 638 if (RTMpGetArraySize() > RT_ELEMENTS(g_ HmR0.aCpuInfo))639 { 640 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_ HmR0.aCpuInfo)));616 if (RTMpGetArraySize() > RT_ELEMENTS(g_aHmCpuInfo)) 617 { 618 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aHmCpuInfo))); 641 619 return VERR_TOO_MANY_CPUS; 642 620 } … … 663 641 * when brought offline/online or suspending/resuming. 664 642 */ 665 if (!g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)643 if (!g_fHmVmxUsingSUPR0EnableVTx) 666 644 { 667 645 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL); … … 681 659 SVMR0GlobalTerm(); 682 660 g_HmR0Ops = g_HmR0OpsDummy; 683 g_ HmR0.rcInit = rc;661 g_rcHmInit = rc; 684 662 g_fHmSvmSupported = false; 685 663 g_fHmVmxSupported = false; … … 690 668 else 691 669 { 692 g_ HmR0.rcInit = rc;670 g_rcHmInit = rc; 693 671 rc = VINF_SUCCESS; /* We return success here because module init shall not fail if HM fails to initialize. */ 694 672 } … … 706 684 int rc; 707 685 if ( g_fHmVmxSupported 708 && g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)686 && g_fHmVmxUsingSUPR0EnableVTx) 709 687 { 710 688 /* 711 689 * Simple if the host OS manages VT-x. 712 690 */ 713 Assert(g_ HmR0.fGlobalInit);714 715 if (g_ HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx)691 Assert(g_fHmGlobalInit); 692 693 if (g_fHmVmxCalledSUPR0EnableVTx) 716 694 { 717 695 rc = SUPR0EnableVTx(false /* fEnable */); 718 g_ HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = false;696 g_fHmVmxCalledSUPR0EnableVTx = false; 719 697 } 720 698 else 721 699 rc = VINF_SUCCESS; 722 700 723 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_ HmR0.aCpuInfo); iCpu++)701 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aHmCpuInfo); iCpu++) 724 702 { 725 g_ HmR0.aCpuInfo[iCpu].fConfigured = false;726 Assert(g_ HmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);703 g_aHmCpuInfo[iCpu].fConfigured = false; 704 Assert(g_aHmCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ); 727 705 } 728 706 } 729 707 else 730 708 { 731 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);709 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 732 710 733 711 /* Doesn't really matter if this fails. */ … … 739 717 * Disable VT-x/AMD-V on all CPUs if we enabled it before. 740 718 */ 741 if (g_ HmR0.fGlobalInit)719 if (g_fHmGlobalInit) 742 720 { 743 721 HMR0FIRSTRC FirstRc; … … 752 730 * Free the per-cpu pages used for VT-x and AMD-V. 753 731 */ 754 for (unsigned i = 0; i < RT_ELEMENTS(g_ HmR0.aCpuInfo); i++)732 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++) 755 733 { 756 if (g_ HmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)734 if (g_aHmCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ) 757 735 { 758 RTR0MemObjFree(g_ HmR0.aCpuInfo[i].hMemObj, false);759 g_ HmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;760 g_ HmR0.aCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS;761 g_ HmR0.aCpuInfo[i].pvMemObj = NULL;736 RTR0MemObjFree(g_aHmCpuInfo[i].hMemObj, false); 737 g_aHmCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 738 g_aHmCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS; 739 g_aHmCpuInfo[i].pvMemObj = NULL; 762 740 } 763 741 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 764 if (g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm != NIL_RTR0MEMOBJ)742 if (g_aHmCpuInfo[i].n.svm.hNstGstMsrpm != NIL_RTR0MEMOBJ) 765 743 { 766 RTR0MemObjFree(g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm, false);767 g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ;768 g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS;769 g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm = NULL;744 RTR0MemObjFree(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, false); 745 g_aHmCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ; 746 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS; 747 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = NULL; 770 748 } 771 749 #endif … … 797 775 static int hmR0EnableCpu(PVMCC pVM, RTCPUID idCpu) 798 776 { 799 PHMPHYSCPU pHostCpu = &g_ HmR0.aCpuInfo[idCpu];777 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu]; 800 778 801 779 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ 802 Assert(idCpu < RT_ELEMENTS(g_ HmR0.aCpuInfo));780 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo)); 803 781 Assert(!pHostCpu->fConfigured); 804 782 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 809 787 int rc; 810 788 if ( g_fHmVmxSupported 811 && g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)789 && g_fHmVmxUsingSUPR0EnableVTx) 812 790 rc = g_HmR0Ops.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmMsrs); 813 791 else … … 833 811 PVMCC pVM = (PVMCC)pvUser1; /* can be NULL! */ 834 812 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; 835 AssertReturnVoid(g_ HmR0.fGlobalInit);813 AssertReturnVoid(g_fHmGlobalInit); 836 814 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 837 815 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu)); … … 855 833 * notification. Kind of unlikely though, so ignored for now. 856 834 */ 857 AssertReturn(!g_ HmR0.fEnabled, VERR_HM_ALREADY_ENABLED_IPE);858 ASMAtomicWriteBool(&g_ HmR0.fEnabled, true);835 AssertReturn(!g_fHmEnabled, VERR_HM_ALREADY_ENABLED_IPE); 836 ASMAtomicWriteBool(&g_fHmEnabled, true); 859 837 860 838 /* 861 839 * The global init variable is set by the first VM. 862 840 */ 863 g_ HmR0.fGlobalInit = pVM->hm.s.fGlobalInit;841 g_fHmGlobalInit = pVM->hm.s.fGlobalInit; 864 842 865 843 #ifdef VBOX_STRICT 866 for (unsigned i = 0; i < RT_ELEMENTS(g_ HmR0.aCpuInfo); i++)867 { 868 Assert(g_ HmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);869 Assert(g_ HmR0.aCpuInfo[i].HCPhysMemObj == NIL_RTHCPHYS);870 Assert(g_ HmR0.aCpuInfo[i].pvMemObj == NULL);871 Assert(!g_ HmR0.aCpuInfo[i].fConfigured);872 Assert(!g_ HmR0.aCpuInfo[i].cTlbFlushes);873 Assert(!g_ HmR0.aCpuInfo[i].uCurrentAsid);844 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++) 845 { 846 Assert(g_aHmCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 847 Assert(g_aHmCpuInfo[i].HCPhysMemObj == NIL_RTHCPHYS); 848 Assert(g_aHmCpuInfo[i].pvMemObj == NULL); 849 Assert(!g_aHmCpuInfo[i].fConfigured); 850 Assert(!g_aHmCpuInfo[i].cTlbFlushes); 851 Assert(!g_aHmCpuInfo[i].uCurrentAsid); 874 852 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 875 Assert(g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ);876 Assert(g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm == NIL_RTHCPHYS);877 Assert(g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm == NULL);853 Assert(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ); 854 Assert(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm == NIL_RTHCPHYS); 855 Assert(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm == NULL); 878 856 # endif 879 857 } … … 882 860 int rc; 883 861 if ( g_fHmVmxSupported 884 && g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)862 && g_fHmVmxUsingSUPR0EnableVTx) 885 863 { 886 864 /* … … 890 868 if (RT_SUCCESS(rc)) 891 869 { 892 g_ HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = true;870 g_fHmVmxCalledSUPR0EnableVTx = true; 893 871 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 894 g_ HmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;872 g_fHmGlobalInit = pVM->hm.s.fGlobalInit = true; 895 873 } 896 874 else … … 903 881 */ 904 882 /* Allocate one page per cpu for the global VT-x and AMD-V pages */ 905 for (unsigned i = 0; i < RT_ELEMENTS(g_ HmR0.aCpuInfo); i++)883 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++) 906 884 { 907 Assert(g_ HmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);885 Assert(g_aHmCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 908 886 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 909 Assert(g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ);887 Assert(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ); 910 888 #endif 911 889 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i))) 912 890 { 913 891 /** @todo NUMA */ 914 rc = RTR0MemObjAllocCont(&g_ HmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, false /* executable R0 mapping */);892 rc = RTR0MemObjAllocCont(&g_aHmCpuInfo[i].hMemObj, PAGE_SIZE, false /* executable R0 mapping */); 915 893 AssertLogRelRCReturn(rc, rc); 916 894 917 g_ HmR0.aCpuInfo[i].HCPhysMemObj = RTR0MemObjGetPagePhysAddr(g_HmR0.aCpuInfo[i].hMemObj, 0);918 Assert(g_ HmR0.aCpuInfo[i].HCPhysMemObj != NIL_RTHCPHYS);919 Assert(!(g_ HmR0.aCpuInfo[i].HCPhysMemObj & PAGE_OFFSET_MASK));920 921 g_ HmR0.aCpuInfo[i].pvMemObj = RTR0MemObjAddress(g_HmR0.aCpuInfo[i].hMemObj);922 AssertPtr(g_ HmR0.aCpuInfo[i].pvMemObj);923 ASMMemZeroPage(g_ HmR0.aCpuInfo[i].pvMemObj);895 g_aHmCpuInfo[i].HCPhysMemObj = RTR0MemObjGetPagePhysAddr(g_aHmCpuInfo[i].hMemObj, 0); 896 Assert(g_aHmCpuInfo[i].HCPhysMemObj != NIL_RTHCPHYS); 897 Assert(!(g_aHmCpuInfo[i].HCPhysMemObj & PAGE_OFFSET_MASK)); 898 899 g_aHmCpuInfo[i].pvMemObj = RTR0MemObjAddress(g_aHmCpuInfo[i].hMemObj); 900 AssertPtr(g_aHmCpuInfo[i].pvMemObj); 901 ASMMemZeroPage(g_aHmCpuInfo[i].pvMemObj); 924 902 925 903 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 926 rc = RTR0MemObjAllocCont(&g_ HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT,904 rc = RTR0MemObjAllocCont(&g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, 927 905 false /* executable R0 mapping */); 928 906 AssertLogRelRCReturn(rc, rc); 929 907 930 g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm = RTR0MemObjGetPagePhysAddr(g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm, 0);931 Assert(g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm != NIL_RTHCPHYS);932 Assert(!(g_ HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm & PAGE_OFFSET_MASK));933 934 g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm = RTR0MemObjAddress(g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm);935 AssertPtr(g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm);936 ASMMemFill32(g_ HmR0.aCpuInfo[i].n.svm.pvNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));908 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = RTR0MemObjGetPagePhysAddr(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, 0); 909 Assert(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm != NIL_RTHCPHYS); 910 Assert(!(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm & PAGE_OFFSET_MASK)); 911 912 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = RTR0MemObjAddress(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm); 913 AssertPtr(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm); 914 ASMMemFill32(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 937 915 #endif 938 916 } … … 943 921 944 922 if ( RT_SUCCESS(rc) 945 && g_ HmR0.fGlobalInit)923 && g_fHmGlobalInit) 946 924 { 947 925 /* First time, so initialize each cpu/core. */ … … 966 944 { 967 945 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 968 if (ASMAtomicReadBool(&g_ HmR0.fSuspended))946 if (ASMAtomicReadBool(&g_fHmSuspended)) 969 947 return VERR_HM_SUSPEND_PENDING; 970 948 971 return RTOnce(&g_Hm R0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM);949 return RTOnce(&g_HmEnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM); 972 950 } 973 951 … … 983 961 static int hmR0DisableCpu(RTCPUID idCpu) 984 962 { 985 PHMPHYSCPU pHostCpu = &g_ HmR0.aCpuInfo[idCpu];986 987 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);963 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu]; 964 965 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 988 966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 989 967 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ 990 Assert(idCpu < RT_ELEMENTS(g_ HmR0.aCpuInfo));968 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo)); 991 969 Assert(!pHostCpu->fConfigured || pHostCpu->hMemObj != NIL_RTR0MEMOBJ); 992 970 AssertRelease(idCpu == RTMpCpuId()); … … 1023 1001 { 1024 1002 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; NOREF(pvUser1); 1025 AssertReturnVoid(g_ HmR0.fGlobalInit);1003 AssertReturnVoid(g_fHmGlobalInit); 1026 1004 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu)); 1027 1005 } … … 1054 1032 { 1055 1033 NOREF(pvData); 1056 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1034 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 1057 1035 1058 1036 /* … … 1095 1073 { 1096 1074 NOREF(pvUser); 1097 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1075 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 1098 1076 1099 1077 #ifdef LOG_ENABLED … … 1105 1083 1106 1084 if (enmEvent == RTPOWEREVENT_SUSPEND) 1107 ASMAtomicWriteBool(&g_ HmR0.fSuspended, true);1108 1109 if (g_ HmR0.fEnabled)1085 ASMAtomicWriteBool(&g_fHmSuspended, true); 1086 1087 if (g_fHmEnabled) 1110 1088 { 1111 1089 int rc; … … 1115 1093 if (enmEvent == RTPOWEREVENT_SUSPEND) 1116 1094 { 1117 if (g_ HmR0.fGlobalInit)1095 if (g_fHmGlobalInit) 1118 1096 { 1119 1097 /* Turn off VT-x or AMD-V on all CPUs. */ … … 1138 1116 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc); 1139 1117 #endif 1140 if (g_ HmR0.fGlobalInit)1118 if (g_fHmGlobalInit) 1141 1119 { 1142 1120 /* Turn VT-x or AMD-V back on on all CPUs. */ … … 1149 1127 1150 1128 if (enmEvent == RTPOWEREVENT_RESUME) 1151 ASMAtomicWriteBool(&g_ HmR0.fSuspended, false);1129 ASMAtomicWriteBool(&g_fHmSuspended, false); 1152 1130 } 1153 1131 … … 1170 1148 1171 1149 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 1172 if (ASMAtomicReadBool(&g_ HmR0.fSuspended))1150 if (ASMAtomicReadBool(&g_fHmSuspended)) 1173 1151 return VERR_HM_SUSPEND_PENDING; 1174 1152 … … 1225 1203 /* If you need to tweak host MSRs for testing SVM R0 code, do it here. */ 1226 1204 } 1227 pVM->hm.s.rcInit = g_ HmR0.rcInit;1205 pVM->hm.s.rcInit = g_rcHmInit; 1228 1206 pVM->hm.s.uMaxAsidForLog = g_uHmMaxAsid; 1229 1207 … … 1339 1317 1340 1318 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 1341 AssertReturn(!ASMAtomicReadBool(&g_ HmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1319 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING); 1342 1320 1343 1321 /* On first entry we'll sync everything. */ … … 1354 1332 /* Enable VT-x or AMD-V if local init is required. */ 1355 1333 int rc; 1356 if (!g_ HmR0.fGlobalInit)1357 { 1358 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1334 if (!g_fHmGlobalInit) 1335 { 1336 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 1359 1337 rc = hmR0EnableCpu(pVM, idCpu); 1360 1338 if (RT_FAILURE(rc)) … … 1369 1347 1370 1348 /* Disable VT-x or AMD-V if local init was done before. */ 1371 if (!g_ HmR0.fGlobalInit)1372 { 1373 Assert(!g_fHmVmxSupported || !g_ HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1349 if (!g_fHmGlobalInit) 1350 { 1351 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx); 1374 1352 int rc2 = hmR0DisableCpu(idCpu); 1375 1353 AssertRC(rc2); … … 1413 1391 int rc = VINF_SUCCESS; 1414 1392 RTCPUID const idCpu = RTMpCpuId(); 1415 PHMPHYSCPU pHostCpu = &g_ HmR0.aCpuInfo[idCpu];1393 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu]; 1416 1394 AssertPtr(pHostCpu); 1417 1395 … … 1446 1424 { 1447 1425 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1448 AssertReturn(!ASMAtomicReadBool(&g_ HmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1426 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING); 1449 1427 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1450 1428 … … 1498 1476 1499 1477 RTCPUID const idCpu = RTMpCpuId(); 1500 PCHMPHYSCPU pHostCpu = &g_ HmR0.aCpuInfo[idCpu];1501 1502 if ( !g_ HmR0.fGlobalInit1478 PCHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu]; 1479 1480 if ( !g_fHmGlobalInit 1503 1481 && pHostCpu->fConfigured) 1504 1482 { … … 1533 1511 Assert(g_HmR0Ops.pfnThreadCtxCallback); 1534 1512 1535 g_HmR0Ops.pfnThreadCtxCallback(enmEvent, pVCpu, g_ HmR0.fGlobalInit);1513 g_HmR0Ops.pfnThreadCtxCallback(enmEvent, pVCpu, g_fHmGlobalInit); 1536 1514 } 1537 1515 … … 1556 1534 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 1557 1535 { 1558 PCHMPHYSCPU pHostCpu = &g_ HmR0.aCpuInfo[RTMpCpuId()];1536 PCHMPHYSCPU pHostCpu = &g_aHmCpuInfo[RTMpCpuId()]; 1559 1537 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1560 1538 Assert(pHostCpu->fConfigured); 1561 AssertReturn(!ASMAtomicReadBool(&g_ HmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1539 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING); 1562 1540 } 1563 1541 #endif … … 1608 1586 VMMR0_INT_DECL(bool) HMR0SuspendPending(void) 1609 1587 { 1610 return ASMAtomicReadBool(&g_ HmR0.fSuspended);1588 return ASMAtomicReadBool(&g_fHmSuspended); 1611 1589 } 1612 1590 … … 1637 1615 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1638 1616 RTCPUID const idCpu = RTMpCpuId(); 1639 Assert(idCpu < RT_ELEMENTS(g_ HmR0.aCpuInfo));1640 return &g_ HmR0.aCpuInfo[idCpu];1617 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo)); 1618 return &g_aHmCpuInfo[idCpu]; 1641 1619 } 1642 1620
Note:
See TracChangeset
for help on using the changeset viewer.