VirtualBox

Changeset 37320 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 3, 2011 3:05:36 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
72075
Message:

HWACCM: Slowly shortening it to 'HM' (too much uppercase typing for such an important component). Rewview and cleanups.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r37319 r37320  
    11/* $Id$ */
    22/** @file
    3  * HWACCM - Host Context Ring 0.
     3 * Hardware Assisted Virtualization Manager - Host Context Ring-0.
    44 */
    55
     
    4848*   Internal Functions                                                         *
    4949*******************************************************************************/
    50 static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
    51 static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
    52 static DECLCALLBACK(void) hwaccmR0InitCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
    53 static bool               hwaccmR0IsSubjectToVmxPreemptionTimerErratum(void);
    54 static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
    55 static DECLCALLBACK(void) hwaccmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
     50static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     51static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     52static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     53static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     54static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
     55static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
     56
     57
     58/*******************************************************************************
     59*   Structures and Typedefs                                                    *
     60*******************************************************************************/
     61/**
     62 * This is used to manage the status code of a RTMpOnAll in HM.
     63 */
     64typedef struct HMR0FIRSTRC
     65{
     66    /** The status code. */
     67    int32_t volatile    rc;
     68    /** The ID of the CPU reporting the first failure. */
     69    RTCPUID volatile    idCpu;
     70} HMR0FIRSTRC;
     71/** Pointer to a first return code structure. */
     72typedef HMR0FIRSTRC *PHMR0FIRSTRC;
    5673
    5774
     
    5976*   Global Variables                                                           *
    6077*******************************************************************************/
    61 
     78/**
     79 * Global data.
     80 */
    6281static struct
    6382{
    64     HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
    65 
    66     /** Ring 0 handlers for VT-x and AMD-V. */
    67     DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu));
     83    /** Per CPU globals. */
     84    HMGLOBLCPUINFO                  aCpuInfo[RTCPUSET_MAX_CPUS];
     85
     86    /** @name Ring-0 method table for AMD-V and VT-x specific operations.
     87     * @{ */
     88    DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
    6889    DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    6990    DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
    7091    DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    7192    DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    72     DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    73     DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    74     DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
    75     DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
    76     DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
     93    DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     94    DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     95    DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM));
     96    DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM));
     97    DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM));
     98    /** @} */
    7799
    78100    /** Maximum ASID allowed. */
    79101    uint32_t                        uMaxASID;
    80102
     103    /** VT-x data. */
    81104    struct
    82105    {
     
    118141    } vmx;
    119142
     143    /** AMD-V information. */
    120144    struct
    121145    {
     
    155179    /** Serialize initialization in HWACCMR0EnableAllCpus. */
    156180    RTONCE                          EnableAllCpusOnce;
    157 } HWACCMR0Globals;
    158 
    159 
    160 /**
    161  * This is used to manage the status code of a RTMpOnAll in HWACCM.
    162  */
    163 typedef struct HWACCMR0FIRSTRC
    164 {
    165     /** The status code. */
    166     int32_t volatile    rc;
    167     /** The ID of the CPU reporting the first failure. */
    168     RTCPUID volatile    idCpu;
    169 } HWACCMR0FIRSTRC;
    170 /** Pointer to a first return code structure. */
    171 typedef HWACCMR0FIRSTRC *PHWACCMR0FIRSTRC;
     181} g_HvmR0;
     182
    172183
    173184
     
    177188 * @param   pFirstRc            The structure to init.
    178189 */
    179 static void hwaccmR0FirstRcInit(PHWACCMR0FIRSTRC pFirstRc)
     190static void hmR0FirstRcInit(PHMR0FIRSTRC pFirstRc)
    180191{
    181192    pFirstRc->rc    = VINF_SUCCESS;
     
    190201 * @param   rc                  The status code.
    191202 */
    192 static void hwaccmR0FirstRcSetStatus(PHWACCMR0FIRSTRC pFirstRc, int rc)
     203static void     hmR0FirstRcSetStatus(PHMR0FIRSTRC pFirstRc, int rc)
    193204{
    194205    if (   RT_FAILURE(rc)
     
    205216 * @param   pFirstRc            The first return code structure.
    206217 */
    207 static int hwaccmR0FirstRcGetStatus(PHWACCMR0FIRSTRC pFirstRc)
     218static int hmR0FirstRcGetStatus(PHMR0FIRSTRC pFirstRc)
    208219{
    209220    return pFirstRc->rc;
     
    217228 * @param   pFirstRc            The first return code structure.
    218229 */
    219 static RTCPUID hwaccmR0FirstRcGetCpuId(PHWACCMR0FIRSTRC pFirstRc)
     230static RTCPUID hmR0FirstRcGetCpuId(PHMR0FIRSTRC pFirstRc)
    220231{
    221232    return pFirstRc->idCpu;
     
    226237 * @{ */
    227238
    228 static DECLCALLBACK(int) hwaccmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
     239static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    229240{
    230241    return VINF_SUCCESS;
    231242}
    232243
    233 static DECLCALLBACK(int) hwaccmR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     244static DECLCALLBACK(int) hmR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    234245{
    235246    return VINF_SUCCESS;
    236247}
    237248
    238 static DECLCALLBACK(int) hwaccmR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     249static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    239250{
    240251    return VINF_SUCCESS;
    241252}
    242253
    243 static DECLCALLBACK(int) hwaccmR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     254static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    244255{
    245256    return VINF_SUCCESS;
    246257}
    247258
    248 static DECLCALLBACK(int) hwaccmR0DummyInitVM(PVM pVM)
     259static DECLCALLBACK(int) hmR0DummyInitVM(PVM pVM)
    249260{
    250261    return VINF_SUCCESS;
    251262}
    252263
    253 static DECLCALLBACK(int) hwaccmR0DummyTermVM(PVM pVM)
     264static DECLCALLBACK(int) hmR0DummyTermVM(PVM pVM)
    254265{
    255266    return VINF_SUCCESS;
    256267}
    257268
    258 static DECLCALLBACK(int) hwaccmR0DummySetupVM(PVM pVM)
     269static DECLCALLBACK(int) hmR0DummySetupVM(PVM pVM)
    259270{
    260271    return VINF_SUCCESS;
    261272}
    262273
    263 static DECLCALLBACK(int) hwaccmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     274static DECLCALLBACK(int) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    264275{
    265276    return VINF_SUCCESS;
    266277}
    267278
    268 static DECLCALLBACK(int) hwaccmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
     279static DECLCALLBACK(int) hmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
    269280{
    270281    return VINF_SUCCESS;
    271282}
    272283
    273 static DECLCALLBACK(int) hwaccmR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     284static DECLCALLBACK(int) hmR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    274285{
    275286    return VINF_SUCCESS;
     
    277288
    278289/** @} */
    279 
    280 
    281 
    282 /**
    283  * Does global Ring-0 HWACCM initialization (at module init).
    284  *
    285  * @returns VBox status code.
    286  */
    287 VMMR0DECL(int) HWACCMR0Init(void)
    288 {
    289 
    290     /*
    291      * Initialize the globals.
    292      */
    293     HWACCMR0Globals.fEnabled = false;
    294     static RTONCE s_OnceInit = RTONCE_INITIALIZER;
    295     HWACCMR0Globals.EnableAllCpusOnce = s_OnceInit;
    296     for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
    297         HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
    298 
    299     /* Fill in all callbacks with placeholders. */
    300     HWACCMR0Globals.pfnEnterSession     = hwaccmR0DummyEnter;
    301     HWACCMR0Globals.pfnLeaveSession     = hwaccmR0DummyLeave;
    302     HWACCMR0Globals.pfnSaveHostState    = hwaccmR0DummySaveHostState;
    303     HWACCMR0Globals.pfnLoadGuestState   = hwaccmR0DummyLoadGuestState;
    304     HWACCMR0Globals.pfnRunGuestCode     = hwaccmR0DummyRunGuestCode;
    305     HWACCMR0Globals.pfnEnableCpu        = hwaccmR0DummyEnableCpu;
    306     HWACCMR0Globals.pfnDisableCpu       = hwaccmR0DummyDisableCpu;
    307     HWACCMR0Globals.pfnInitVM           = hwaccmR0DummyInitVM;
    308     HWACCMR0Globals.pfnTermVM           = hwaccmR0DummyTermVM;
    309     HWACCMR0Globals.pfnSetupVM          = hwaccmR0DummySetupVM;
    310 
    311     /* Default is global VT-x/AMD-V init */
    312     HWACCMR0Globals.fGlobalInit         = true;
    313 
    314     /*
    315      * Make sure aCpuInfo is big enough for all the CPUs on this system.
    316      */
    317     if (RTMpGetArraySize() > RT_ELEMENTS(HWACCMR0Globals.aCpuInfo))
    318     {
    319         LogRel(("HWACCM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)));
    320         return VERR_TOO_MANY_CPUS;
    321     }
    322 
    323     /*
    324      * Check for VT-x and AMD-V capabilities
    325      */
    326     int rc;
    327     if (ASMHasCpuId())
    328     {
    329         uint32_t u32FeaturesECX, u32FeaturesEDX;
    330         uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
    331         uint32_t u32Dummy;
    332 
    333         /* STandard features. */
    334         ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
    335         ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
    336 
    337         /* Query AMD features. */
    338         ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX,
    339                  &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
    340 
    341         /*
    342          * Intel CPU?
    343          */
    344         if (   u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
    345             && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
    346             && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
    347            )
    348         {
    349             /*
    350              * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
    351              * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
    352              */
    353             if (    (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
    354                  && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
    355                  && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
    356                )
    357             {
    358                 /** @todo move this into a separate function. */
    359                 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    360 
    361                 /*
    362                  * First try use native kernel API for controlling VT-x.
    363                  * (This is only supported by some Mac OS X kernels atm.)
    364                  */
    365                 HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */);
    366                 if (rc != VERR_NOT_SUPPORTED)
    367                 {
    368                     AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
    369                     HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true;
    370                     if (RT_SUCCESS(rc))
    371                     {
    372                         HWACCMR0Globals.vmx.fSupported = true;
    373                         rc = SUPR0EnableVTx(false /* fEnable */);
    374                         AssertRC(rc);
    375                     }
    376                 }
    377                 else
    378                 {
    379                     HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false;
    380 
    381                     /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
    382                     HWACCMR0FIRSTRC FirstRc;
    383                     hwaccmR0FirstRcInit(&FirstRc);
    384                     HWACCMR0Globals.lLastError = RTMpOnAll(hwaccmR0InitCpu, (void *)(uintptr_t)u32VendorEBX, &FirstRc);
    385                     if (RT_SUCCESS(HWACCMR0Globals.lLastError))
    386                         HWACCMR0Globals.lLastError = hwaccmR0FirstRcGetStatus(&FirstRc);
    387                 }
    388                 if (RT_SUCCESS(HWACCMR0Globals.lLastError))
    389                 {
    390                     /* Reread in case we've changed it. */
    391                     HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    392 
    393                     if (   (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    394                         ==                                         (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    395                     {
    396                         RTR0MEMOBJ pScatchMemObj;
    397                         void      *pvScatchPage;
    398                         RTHCPHYS   pScatchPagePhys;
    399 
    400                         HWACCMR0Globals.vmx.msr.vmx_basic_info  = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
    401                         HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u  = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    402                         HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    403                         HWACCMR0Globals.vmx.msr.vmx_exit.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    404                         HWACCMR0Globals.vmx.msr.vmx_entry.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    405                         HWACCMR0Globals.vmx.msr.vmx_misc        = ASMRdMsr(MSR_IA32_VMX_MISC);
    406                         HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
    407                         HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
    408                         HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
    409                         HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    410                         HWACCMR0Globals.vmx.msr.vmx_vmcs_enum   = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
    411                         /* VPID 16 bits ASID. */
    412                         HWACCMR0Globals.uMaxASID                = 0x10000; /* exclusive */
    413 
    414                         if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    415                         {
    416                             HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
    417                             if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT|VMX_VMCS_CTRL_PROC_EXEC2_VPID))
    418                                 HWACCMR0Globals.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
    419                         }
    420 
    421                         if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
    422                         {
    423                             HWACCMR0Globals.vmx.hostCR4             = ASMGetCR4();
    424                             HWACCMR0Globals.vmx.hostEFER            = ASMRdMsr(MSR_K6_EFER);
    425 
    426                             rc = RTR0MemObjAllocCont(&pScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */);
    427                             if (RT_FAILURE(rc))
    428                                 return rc;
    429 
    430                             pvScatchPage    = RTR0MemObjAddress(pScatchMemObj);
    431                             pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
    432                             memset(pvScatchPage, 0, PAGE_SIZE);
    433 
    434                             /* Set revision dword at the beginning of the structure. */
    435                             *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
    436 
    437                             /* Make sure we don't get rescheduled to another cpu during this probe. */
    438                             RTCCUINTREG fFlags = ASMIntDisableFlags();
    439 
    440                             /*
    441                              * Check CR4.VMXE
    442                              */
    443                             if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
    444                             {
    445                                 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
    446                                  * try to execute the VMX instructions...
    447                                  */
    448                                 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
    449                             }
    450 
    451                             /* Enter VMX Root Mode */
    452                             rc = VMXEnable(pScatchPagePhys);
    453                             if (RT_FAILURE(rc))
    454                             {
    455                                 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
    456                                  * (a) clearing X86_CR4_VMXE in CR4 causes a #GP    (we no longer modify this bit)
    457                                  * (b) turning off paging causes a #GP              (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
    458                                  *
    459                                  * They should fix their code, but until they do we simply refuse to run.
    460                                  */
    461                                 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
    462                             }
    463                             else
    464                             {
    465                                 HWACCMR0Globals.vmx.fSupported = true;
    466                                 VMXDisable();
    467 
    468                                 /*
    469                                  * Check for the VMX-Preemption Timer and adjust for the
    470                                  * "VMX-Preemption Timer Does Not Count Down at the Rate Specified" erratum.
    471                                  */
    472                                 if (  HWACCMR0Globals.vmx.msr.vmx_pin_ctls.n.allowed1
    473                                     & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
    474                                 {
    475                                     HWACCMR0Globals.vmx.fUsePreemptTimer   = true;
    476                                     HWACCMR0Globals.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(HWACCMR0Globals.vmx.msr.vmx_misc);
    477                                     if (hwaccmR0IsSubjectToVmxPreemptionTimerErratum())
    478                                         HWACCMR0Globals.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
    479                                 }
    480                             }
    481 
    482                             /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
    483                             ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
    484                             ASMSetFlags(fFlags);
    485 
    486                             RTR0MemObjFree(pScatchMemObj, false);
    487                             if (RT_FAILURE(HWACCMR0Globals.lLastError))
    488                                 return HWACCMR0Globals.lLastError;
    489                         }
    490                     }
    491                     else
    492                     {
    493                         AssertFailed(); /* can't hit this case anymore */
    494                         HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
    495                     }
    496 
    497                     if (HWACCMR0Globals.vmx.fSupported)
    498                     {
    499                         HWACCMR0Globals.pfnEnterSession     = VMXR0Enter;
    500                         HWACCMR0Globals.pfnLeaveSession     = VMXR0Leave;
    501                         HWACCMR0Globals.pfnSaveHostState    = VMXR0SaveHostState;
    502                         HWACCMR0Globals.pfnLoadGuestState   = VMXR0LoadGuestState;
    503                         HWACCMR0Globals.pfnRunGuestCode     = VMXR0RunGuestCode;
    504                         HWACCMR0Globals.pfnEnableCpu        = VMXR0EnableCpu;
    505                         HWACCMR0Globals.pfnDisableCpu       = VMXR0DisableCpu;
    506                         HWACCMR0Globals.pfnInitVM           = VMXR0InitVM;
    507                         HWACCMR0Globals.pfnTermVM           = VMXR0TermVM;
    508                         HWACCMR0Globals.pfnSetupVM          = VMXR0SetupVM;
    509                     }
    510                 }
    511 #ifdef LOG_ENABLED
    512                 else
    513                     SUPR0Printf("hwaccmR0InitCpu failed with rc=%d\n", HWACCMR0Globals.lLastError);
    514 #endif
    515             }
    516             else
    517                 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
    518         }
    519         /*
    520          * AMD CPU?
    521          */
    522         else if (   u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
    523                  && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
    524                  && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
    525                 )
    526         {
    527             /** @todo move this into a separate function. */
    528 
    529             /*
    530              * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
    531              * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
    532              */
    533             if (   (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
    534                 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
    535                 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
    536                )
    537             {
    538                 HWACCMR0Globals.pfnEnterSession     = SVMR0Enter;
    539                 HWACCMR0Globals.pfnLeaveSession     = SVMR0Leave;
    540                 HWACCMR0Globals.pfnSaveHostState    = SVMR0SaveHostState;
    541                 HWACCMR0Globals.pfnLoadGuestState   = SVMR0LoadGuestState;
    542                 HWACCMR0Globals.pfnRunGuestCode     = SVMR0RunGuestCode;
    543                 HWACCMR0Globals.pfnEnableCpu        = SVMR0EnableCpu;
    544                 HWACCMR0Globals.pfnDisableCpu       = SVMR0DisableCpu;
    545                 HWACCMR0Globals.pfnInitVM           = SVMR0InitVM;
    546                 HWACCMR0Globals.pfnTermVM           = SVMR0TermVM;
    547                 HWACCMR0Globals.pfnSetupVM          = SVMR0SetupVM;
    548 
    549                 /* Query AMD features. */
    550                 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID,
    551                          &u32Dummy, &HWACCMR0Globals.svm.u32Features);
    552 
    553                 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
    554                 HWACCMR0FIRSTRC FirstRc;
    555                 hwaccmR0FirstRcInit(&FirstRc);
    556                 rc = RTMpOnAll(hwaccmR0InitCpu, (void *)(uintptr_t)u32VendorEBX, &FirstRc);    AssertRC(rc);
    557                 if (RT_SUCCESS(rc))
    558                     rc = hwaccmR0FirstRcGetStatus(&FirstRc);
    559 #ifndef DEBUG_bird
    560                 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
    561                           ("hwaccmR0InitCpu failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
    562 #endif
    563                 if (RT_SUCCESS(rc))
    564                 {
    565                     /* Read the HWCR msr for diagnostics. */
    566                     HWACCMR0Globals.svm.msrHWCR    = ASMRdMsr(MSR_K8_HWCR);
    567                     HWACCMR0Globals.svm.fSupported = true;
    568                 }
    569                 else
    570                     HWACCMR0Globals.lLastError = rc;
    571             }
    572             else
    573                 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
    574         }
    575         /*
    576          * Unknown CPU.
    577          */
    578         else
    579             HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
    580     }
    581     else
    582         HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
    583 
    584     /*
    585      * Register notification callbacks that we can use to disable/enable CPUs
    586      * when brought offline/online or suspending/resuming.
    587      */
    588     if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
    589     {
    590         rc = RTMpNotificationRegister(hwaccmR0MpEventCallback, NULL);
    591         AssertRC(rc);
    592 
    593         rc = RTPowerNotificationRegister(hwaccmR0PowerCallback, NULL);
    594         AssertRC(rc);
    595     }
    596 
    597     /* We return success here because module init shall not fail if HWACCM
    598        fails to initialize. */
    599     return VINF_SUCCESS;
    600 }
    601290
    602291
     
    622311 * @returns true if subject to it, false if not.
    623312 */
    624 static bool hwaccmR0IsSubjectToVmxPreemptionTimerErratum(void)
     313static bool hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum(void)
    625314{
    626315    uint32_t u = ASMCpuId_EAX(1);
     
    647336
    648337/**
    649  * Does global Ring-0 HWACCM termination.
     338 * Intel specific initialization code.
     339 *
     340 * @returns VBox status code (will only fail if out of memory).
     341 */
     342static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX)
     343{
     344    /*
     345     * Check that all the required VT-x features are present.
     346     * We also assume all VT-x-enabled CPUs support fxsave/fxrstor.
     347     */
     348    if (    (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
     349         && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
     350         && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
     351       )
     352    {
     353        /** @todo move this into a separate function. */
     354        g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     355
     356        /*
     357         * First try use native kernel API for controlling VT-x.
     358         * (This is only supported by some Mac OS X kernels atm.)
     359         */
     360        int rc = g_HvmR0.lLastError = SUPR0EnableVTx(true /* fEnable */);
     361        g_HvmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
     362        if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     363        {
     364            AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
     365            if (RT_SUCCESS(rc))
     366            {
     367                g_HvmR0.vmx.fSupported = true;
     368                rc = SUPR0EnableVTx(false /* fEnable */);
     369                AssertRC(rc);
     370            }
     371        }
     372        else
     373        {
     374            /* We need to check if VT-x has been properly initialized on all
     375               CPUs. Some BIOSes do a lousy job. */
     376            HMR0FIRSTRC FirstRc;
     377            hmR0FirstRcInit(&FirstRc);
     378            g_HvmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
     379            if (RT_SUCCESS(g_HvmR0.lLastError))
     380                g_HvmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc);
     381        }
     382        if (RT_SUCCESS(g_HvmR0.lLastError))
     383        {
     384            /* Reread in case we've changed it. */
     385            g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     386
     387            if (   (g_HvmR0.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     388                ==                                 (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     389            {
     390                /*
     391                 * Read all relevant MSR.
     392                 */
     393                g_HvmR0.vmx.msr.vmx_basic_info  = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
     394                g_HvmR0.vmx.msr.vmx_pin_ctls.u  = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
     395                g_HvmR0.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
     396                g_HvmR0.vmx.msr.vmx_exit.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
     397                g_HvmR0.vmx.msr.vmx_entry.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
     398                g_HvmR0.vmx.msr.vmx_misc        = ASMRdMsr(MSR_IA32_VMX_MISC);
     399                g_HvmR0.vmx.msr.vmx_cr0_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
     400                g_HvmR0.vmx.msr.vmx_cr0_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
     401                g_HvmR0.vmx.msr.vmx_cr4_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
     402                g_HvmR0.vmx.msr.vmx_cr4_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
     403                g_HvmR0.vmx.msr.vmx_vmcs_enum   = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     404                g_HvmR0.vmx.hostCR4             = ASMGetCR4();
     405                g_HvmR0.vmx.hostEFER            = ASMRdMsr(MSR_K6_EFER);
     406                /* VPID 16 bits ASID. */
     407                g_HvmR0.uMaxASID                = 0x10000; /* exclusive */
     408
     409                if (g_HvmR0.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     410                {
     411                    g_HvmR0.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
     412                    if (  g_HvmR0.vmx.msr.vmx_proc_ctls2.n.allowed1
     413                        & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))
     414                        g_HvmR0.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
     415                }
     416
     417                if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     418                {
     419                    /*
     420                     * Enter root mode
     421                     */
     422                    RTR0MEMOBJ hScatchMemObj;
     423                    rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */);
     424                    if (RT_FAILURE(rc))
     425                        return rc;
     426
     427                    void      *pvScatchPage      = RTR0MemObjAddress(hScatchMemObj);
     428                    RTHCPHYS   HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
     429                    ASMMemZeroPage(pvScatchPage);
     430
     431                    /* Set revision dword at the beginning of the structure. */
     432                    *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.msr.vmx_basic_info);
     433
     434                    /* Make sure we don't get rescheduled to another cpu during this probe. */
     435                    RTCCUINTREG fFlags = ASMIntDisableFlags();
     436
     437                    /*
     438                     * Check CR4.VMXE
     439                     */
     440                    g_HvmR0.vmx.hostCR4 = ASMGetCR4();
     441                    if (!(g_HvmR0.vmx.hostCR4 & X86_CR4_VMXE))
     442                    {
     443                        /* In theory this bit could be cleared behind our back.  Which would cause
     444                           #UD faults when we try to execute the VMX instructions... */
     445                        ASMSetCR4(g_HvmR0.vmx.hostCR4 | X86_CR4_VMXE);
     446                    }
     447
     448                    /* Enter VMX Root Mode */
     449                    rc = VMXEnable(HCPhysScratchPage);
     450                    if (RT_SUCCESS(rc))
     451                    {
     452                        g_HvmR0.vmx.fSupported = true;
     453                        VMXDisable();
     454
     455                        /*
     456                         * Check for the VMX-Preemption Timer and adjust for the * "VMX-Preemption
     457                         * Timer Does Not Count Down at the Rate Specified" erratum.
     458                         */
     459                        if (  g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1
     460                            & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
     461                        {
     462                            g_HvmR0.vmx.fUsePreemptTimer   = true;
     463                            g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.msr.vmx_misc);
     464                            if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
     465                                g_HvmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
     466                        }
     467                    }
     468                    else
     469                    {
     470                        /*
     471                         * KVM leaves the CPU in VMX root mode. Not only is  this not allowed,
     472                         * it will crash the host when we enter raw mode, because:
     473                         *
     474                         *   (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
     475                         *       this bit), and
     476                         *   (b) turning off paging causes a #GP  (unavoidable when switching
     477                         *       from long to 32 bits mode or 32 bits to PAE).
     478                         *
     479                         * They should fix their code, but until they do we simply refuse to run.
     480                         */
     481                        g_HvmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
     482                    }
     483
     484                    /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set
     485                       if it wasn't so before (some software could incorrectly
     486                       think it's in VMX mode). */
     487                    ASMSetCR4(g_HvmR0.vmx.hostCR4);
     488                    ASMSetFlags(fFlags);
     489
     490                    RTR0MemObjFree(hScatchMemObj, false);
     491                }
     492            }
     493            else
     494            {
     495                AssertFailed(); /* can't hit this case anymore */
     496                g_HvmR0.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
     497            }
     498
     499            /*
     500             * Install the VT-x methods.
     501             */
     502            if (g_HvmR0.vmx.fSupported)
     503            {
     504                g_HvmR0.pfnEnterSession     = VMXR0Enter;
     505                g_HvmR0.pfnLeaveSession     = VMXR0Leave;
     506                g_HvmR0.pfnSaveHostState    = VMXR0SaveHostState;
     507                g_HvmR0.pfnLoadGuestState   = VMXR0LoadGuestState;
     508                g_HvmR0.pfnRunGuestCode     = VMXR0RunGuestCode;
     509                g_HvmR0.pfnEnableCpu        = VMXR0EnableCpu;
     510                g_HvmR0.pfnDisableCpu       = VMXR0DisableCpu;
     511                g_HvmR0.pfnInitVM           = VMXR0InitVM;
     512                g_HvmR0.pfnTermVM           = VMXR0TermVM;
     513                g_HvmR0.pfnSetupVM          = VMXR0SetupVM;
     514            }
     515        }
     516#ifdef LOG_ENABLED
     517        else
     518            SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_HvmR0.lLastError);
     519#endif
     520    }
     521    else
     522        g_HvmR0.lLastError = VERR_VMX_NO_VMX;
     523    return VINF_SUCCESS;
     524}
     525
     526
     527/**
     528 * AMD specific initialization code.
     529 */
     530static void hmR0InitAmd(uint32_t u32FeaturesEDX)
     531{
     532    /*
     533     * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
     534     * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
     535     */
     536    if (   (g_HvmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
     537        && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
     538        && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
     539       )
     540    {
     541        g_HvmR0.pfnEnterSession     = SVMR0Enter;
     542        g_HvmR0.pfnLeaveSession     = SVMR0Leave;
     543        g_HvmR0.pfnSaveHostState    = SVMR0SaveHostState;
     544        g_HvmR0.pfnLoadGuestState   = SVMR0LoadGuestState;
     545        g_HvmR0.pfnRunGuestCode     = SVMR0RunGuestCode;
     546        g_HvmR0.pfnEnableCpu        = SVMR0EnableCpu;
     547        g_HvmR0.pfnDisableCpu       = SVMR0DisableCpu;
     548        g_HvmR0.pfnInitVM           = SVMR0InitVM;
     549        g_HvmR0.pfnTermVM           = SVMR0TermVM;
     550        g_HvmR0.pfnSetupVM          = SVMR0SetupVM;
     551
     552        /* Query AMD features. */
     553        uint32_t u32Dummy;
     554        ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxASID,
     555                 &u32Dummy, &g_HvmR0.svm.u32Features);
     556
     557        /*
     558         * We need to check if AMD-V has been properly initialized on all CPUs.
     559         * Some BIOSes might do a poor job.
     560         */
     561        HMR0FIRSTRC FirstRc;
     562        hmR0FirstRcInit(&FirstRc);
     563        int rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
     564        AssertRC(rc);
     565        if (RT_SUCCESS(rc))
     566            rc = hmR0FirstRcGetStatus(&FirstRc);
     567#ifndef DEBUG_bird
     568        AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
     569                  ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
     570#endif
     571        if (RT_SUCCESS(rc))
     572        {
     573            /* Read the HWCR msr for diagnostics. */
     574            g_HvmR0.svm.msrHWCR    = ASMRdMsr(MSR_K8_HWCR);
     575            g_HvmR0.svm.fSupported = true;
     576        }
     577        else
     578            g_HvmR0.lLastError = rc;
     579    }
     580    else
     581        g_HvmR0.lLastError = VERR_SVM_NO_SVM;
     582}
     583
     584
     585/**
     586 * Does global Ring-0 HM initialization (at module init).
    650587 *
    651588 * @returns VBox status code.
    652589 */
     590VMMR0DECL(int) HWACCMR0Init(void)
     591{
     592    /*
     593     * Initialize the globals.
     594     */
     595    g_HvmR0.fEnabled = false;
     596    static RTONCE s_OnceInit = RTONCE_INITIALIZER;
     597    g_HvmR0.EnableAllCpusOnce = s_OnceInit;
     598    for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
     599        g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
     600
     601    /* Fill in all callbacks with placeholders. */
     602    g_HvmR0.pfnEnterSession     = hmR0DummyEnter;
     603    g_HvmR0.pfnLeaveSession     = hmR0DummyLeave;
     604    g_HvmR0.pfnSaveHostState    = hmR0DummySaveHostState;
     605    g_HvmR0.pfnLoadGuestState   = hmR0DummyLoadGuestState;
     606    g_HvmR0.pfnRunGuestCode     = hmR0DummyRunGuestCode;
     607    g_HvmR0.pfnEnableCpu        = hmR0DummyEnableCpu;
     608    g_HvmR0.pfnDisableCpu       = hmR0DummyDisableCpu;
     609    g_HvmR0.pfnInitVM           = hmR0DummyInitVM;
     610    g_HvmR0.pfnTermVM           = hmR0DummyTermVM;
     611    g_HvmR0.pfnSetupVM          = hmR0DummySetupVM;
     612
     613    /* Default is global VT-x/AMD-V init */
     614    g_HvmR0.fGlobalInit         = true;
     615
     616    /*
     617     * Make sure aCpuInfo is big enough for all the CPUs on this system.
     618     */
     619    if (RTMpGetArraySize() > RT_ELEMENTS(g_HvmR0.aCpuInfo))
     620    {
     621        LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_HvmR0.aCpuInfo)));
     622        return VERR_TOO_MANY_CPUS;
     623    }
     624
     625    /*
     626     * Check for VT-x and AMD-V capabilities
     627     */
     628    int rc;
     629    if (ASMHasCpuId())
     630    {
     631        uint32_t u32FeaturesECX, u32FeaturesEDX;
     632        uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
     633        uint32_t u32Dummy;
     634
     635        /* Standard features. */
     636        ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
     637        ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
     638
     639        /* Query AMD features. */
     640        ASMCpuId(0x80000001, &u32Dummy, &u32Dummy,
     641                 &g_HvmR0.cpuid.u32AMDFeatureECX,
     642                 &g_HvmR0.cpuid.u32AMDFeatureEDX);
     643
     644        /* Go to CPU specific initialization code. */
     645        if (   u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
     646            && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
     647            && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX)
     648        {
     649            rc = hmR0InitIntel(u32FeaturesECX, u32FeaturesEDX);
     650            if (RT_FAILURE(rc))
     651                return rc;
     652        }
     653        else if (   u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
     654                 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
     655                 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX)
     656            hmR0InitAmd(u32FeaturesEDX);
     657        else
     658            g_HvmR0.lLastError = VERR_HWACCM_UNKNOWN_CPU;
     659    }
     660    else
     661        g_HvmR0.lLastError = VERR_HWACCM_NO_CPUID;
     662
     663    /*
     664     * Register notification callbacks that we can use to disable/enable CPUs
     665     * when brought offline/online or suspending/resuming.
     666     */
     667    if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     668    {
     669        rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL);
     670        AssertRC(rc);
     671
     672        rc = RTPowerNotificationRegister(hmR0PowerCallback, NULL);
     673        AssertRC(rc);
     674    }
     675
     676    /* We return success here because module init shall not fail if HM
     677       fails to initialize. */
     678    return VINF_SUCCESS;
     679}
     680
     681
     682/**
     683 * Does global Ring-0 HM termination (at module termination).
     684 *
     685 * @returns VBox status code.
     686 */
    653687VMMR0DECL(int) HWACCMR0Term(void)
    654688{
    655689    int rc;
    656     if (   HWACCMR0Globals.vmx.fSupported
    657         && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
     690    if (   g_HvmR0.vmx.fSupported
     691        && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
    658692    {
    659693        /*
    660694         * Simple if the host OS manages VT-x.
    661695         */
    662         Assert(HWACCMR0Globals.fGlobalInit);
     696        Assert(g_HvmR0.fGlobalInit);
    663697        rc = SUPR0EnableVTx(false /* fEnable */);
    664698
    665         for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
     699        for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++)
    666700        {
    667             HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;
    668             Assert(HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
     701            g_HvmR0.aCpuInfo[iCpu].fConfigured = false;
     702            Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
    669703        }
    670704    }
    671705    else
    672706    {
    673         Assert(!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
    674         if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
     707        Assert(!g_HvmR0.vmx.fUsingSUPR0EnableVTx);
     708        if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
    675709        {
    676710            /* Doesn't really matter if this fails. */
    677             rc = RTMpNotificationDeregister(hwaccmR0MpEventCallback, NULL);  AssertRC(rc);
    678             rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, NULL); AssertRC(rc);
     711            rc = RTMpNotificationDeregister(hmR0MpEventCallback, NULL);  AssertRC(rc);
     712            rc = RTPowerNotificationDeregister(hmR0PowerCallback, NULL); AssertRC(rc);
    679713        }
    680714        else
    681715            rc = VINF_SUCCESS;
    682716
    683         /* Only disable VT-x/AMD-V on all CPUs if we enabled it before. */
    684         if (HWACCMR0Globals.fGlobalInit)
     717        /*
     718         * Disable VT-x/AMD-V on all CPUs if we enabled it before.
     719         */
     720        if (g_HvmR0.fGlobalInit)
    685721        {
    686             HWACCMR0FIRSTRC FirstRc;
    687             hwaccmR0FirstRcInit(&FirstRc);
    688             rc = RTMpOnAll(hwaccmR0DisableCpuCallback, NULL, &FirstRc);
     722            HMR0FIRSTRC FirstRc;
     723            hmR0FirstRcInit(&FirstRc);
     724            rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc);
    689725            Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
    690726            if (RT_SUCCESS(rc))
    691727            {
    692                 rc = hwaccmR0FirstRcGetStatus(&FirstRc);
    693                 AssertMsgRC(rc, ("%u: %Rrc\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
     728                rc = hmR0FirstRcGetStatus(&FirstRc);
     729                AssertMsgRC(rc, ("%u: %Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
    694730            }
    695731        }
    696732
    697         /* Free the per-cpu pages used for VT-x and AMD-V */
    698         for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
     733        /*
     734         * Free the per-cpu pages used for VT-x and AMD-V.
     735         */
     736        for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
    699737        {
    700             if (HWACCMR0Globals.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
     738            if (g_HvmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
    701739            {
    702                 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].hMemObj, false);
    703                 HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
     740                RTR0MemObjFree(g_HvmR0.aCpuInfo[i].hMemObj, false);
     741                g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
    704742            }
    705743        }
     
    709747
    710748
    711 
    712 /**
    713  * Worker function used by hwaccmR0PowerCallback  and HWACCMR0Init to initalize
     749/**
     750 * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
     751 * VT-x on a CPU.
     752 *
     753 * @param   idCpu       The identifier for the CPU the function is called on.
     754 * @param   pvUser1     Pointer to the first RC structure.
     755 * @param   pvUser2     Ignored.
     756 */
     757static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     758{
     759    PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
     760    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
     761    NOREF(pvUser2);
     762
     763    /*
     764     * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
     765     * Once the lock bit is set, this MSR can no longer be modified.
     766     */
     767    uint64_t fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     768    if (   !(fFC    & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     769        || (   (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     770            == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */
     771       )
     772    {
     773        /* MSR is not yet locked; we can change it ourselves here */
     774        ASMWrMsr(MSR_IA32_FEATURE_CONTROL,
     775                 g_HvmR0.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
     776        fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     777    }
     778
     779    int rc;
     780    if (   (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     781        == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     782        rc = VINF_SUCCESS;
     783    else
     784        rc = VERR_VMX_MSR_LOCKED_OR_DISABLED;
     785
     786    hmR0FirstRcSetStatus(pFirstRc, rc);
     787}
     788
     789
     790/**
     791 * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
    714792 * VT-x / AMD-V on a CPU.
    715793 *
    716794 * @param   idCpu       The identifier for the CPU the function is called on.
    717  * @param   pvUser1     The EBX value of CPUID(0).
    718  * @param   pvUser2     Pointer to the first RC structure.
    719  */
    720 static DECLCALLBACK(void) hwaccmR0InitCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    721 {
    722     unsigned            u32VendorEBX = (uintptr_t)pvUser1;
    723     PHWACCMR0FIRSTRC    pFirstRc     = (PHWACCMR0FIRSTRC)pvUser2;
    724     uint64_t            val;
    725     int                 rc;
    726 
     795 * @param   pvUser1     Pointer to the first RC structure.
     796 * @param   pvUser2     Ignored.
     797 */
     798static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     799{
     800    PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
    727801    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    728 
    729     if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
    730     {
    731         val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    732 
    733         /*
    734          * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
    735          * Once the lock bit is set, this MSR can no longer be modified.
    736          */
    737         if (   !(val    & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
    738             || (   (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
    739                 == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */
    740            )
     802    NOREF(pvUser2);
     803
     804    /* Check if SVM is disabled. */
     805    int rc;
     806    uint64_t fVmCr = ASMRdMsr(MSR_K8_VM_CR);
     807    if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
     808    {
     809        /* Turn on SVM in the EFER MSR. */
     810        uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
     811        if (fEfer & MSR_K6_EFER_SVME)
     812            rc = VERR_SVM_IN_USE;
     813        else
    741814        {
    742             /* MSR is not yet locked; we can change it ourselves here */
    743             ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
    744             val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     815            ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
     816
     817            /* Paranoia. */
     818            fEfer = ASMRdMsr(MSR_K6_EFER);
     819            if (fEfer & MSR_K6_EFER_SVME)
     820            {
     821                /* Restore previous value. */
     822                ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
     823                rc = VINF_SUCCESS;
     824            }
     825            else
     826                rc = VERR_SVM_ILLEGAL_EFER_MSR;
    745827        }
    746         if (   (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
    747             == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
    748             rc = VINF_SUCCESS;
    749         else
    750             rc = VERR_VMX_MSR_LOCKED_OR_DISABLED;
    751     }
    752     else if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
    753     {
    754         /* Check if SVM is disabled */
    755         val = ASMRdMsr(MSR_K8_VM_CR);
    756         if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
    757         {
    758             /* Turn on SVM in the EFER MSR. */
    759             val = ASMRdMsr(MSR_K6_EFER);
    760             if (val & MSR_K6_EFER_SVME)
    761                 rc = VERR_SVM_IN_USE;
    762             else
    763             {
    764                 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
    765 
    766                 /* Paranoia. */
    767                 val = ASMRdMsr(MSR_K6_EFER);
    768                 if (val & MSR_K6_EFER_SVME)
    769                 {
    770                     /* Restore previous value. */
    771                     ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
    772                     rc = VINF_SUCCESS;
    773                 }
    774                 else
    775                     rc = VERR_SVM_ILLEGAL_EFER_MSR;
    776             }
    777         }
    778         else
    779             rc = VERR_SVM_DISABLED;
    780828    }
    781829    else
    782     {
    783         AssertFailed(); /* can't happen */
    784         rc = VERR_INTERNAL_ERROR_5;
    785     }
    786 
    787     hwaccmR0FirstRcSetStatus(pFirstRc, rc);
    788 }
     830        rc = VERR_SVM_DISABLED;
     831
     832    hmR0FirstRcSetStatus(pFirstRc, rc);
     833}
     834
    789835
    790836
     
    796842 * @param   idCpu       The identifier for the CPU the function is called on.
    797843 */
    798 static int hwaccmR0EnableCpu(PVM pVM, RTCPUID idCpu)
    799 {
    800     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    801 
    802     Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
     844static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu)
     845{
     846    PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
     847
     848    Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
    803849    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    804     Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
     850    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
    805851    Assert(!pCpu->fConfigured);
    806     Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
     852    Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    807853
    808854    pCpu->idCpu         = idCpu;
     
    817863    if (pCpu->hMemObj == NIL_RTR0MEMOBJ)
    818864    {
    819         AssertLogRelMsgFailed(("hwaccmR0EnableCpu failed idCpu=%u.\n", idCpu));
     865        AssertLogRelMsgFailed(("hmR0EnableCpu failed idCpu=%u.\n", idCpu));
    820866        return VERR_INTERNAL_ERROR;
    821867    }
     
    824870    RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    825871
    826     int rc = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
     872    int rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
    827873    AssertRC(rc);
    828874    if (RT_SUCCESS(rc))
     
    841887 * @param   pvUser2     The 2nd user argument.
    842888 */
    843 static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    844 {
    845     PVM                 pVM      = (PVM)pvUser1;     /* can be NULL! */
    846     PHWACCMR0FIRSTRC    pFirstRc = (PHWACCMR0FIRSTRC)pvUser2;
    847     AssertReturnVoid(HWACCMR0Globals.fGlobalInit);
    848     hwaccmR0FirstRcSetStatus(pFirstRc, hwaccmR0EnableCpu(pVM, idCpu));
     889static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     890{
     891    PVM             pVM      = (PVM)pvUser1;     /* can be NULL! */
     892    PHMR0FIRSTRC    pFirstRc = (PHMR0FIRSTRC)pvUser2;
     893    AssertReturnVoid(g_HvmR0.fGlobalInit);
     894    hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu));
    849895}
    850896
     
    857903 * @param   pvUserIgnore    NULL, ignored.
    858904 */
    859 static DECLCALLBACK(int32_t) hwaccmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore)
     905static DECLCALLBACK(int32_t) hmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore)
    860906{
    861907    PVM pVM = (PVM)pvUser;
     
    868914     *       notification.  Kind of unlikely though, so ignored for now.
    869915     */
    870     AssertReturn(!HWACCMR0Globals.fEnabled, VERR_INTERNAL_ERROR_3);
    871     ASMAtomicWriteBool(&HWACCMR0Globals.fEnabled, true);
     916    AssertReturn(!g_HvmR0.fEnabled, VERR_INTERNAL_ERROR_3);
     917    ASMAtomicWriteBool(&g_HvmR0.fEnabled, true);
    872918
    873919    /*
    874920     * The global init variable is set by the first VM.
    875921     */
    876     HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
     922    g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
    877923
    878924    int rc;
    879     if (   HWACCMR0Globals.vmx.fSupported
    880         && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
     925    if (   g_HvmR0.vmx.fSupported
     926        && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
    881927    {
    882928        /*
     
    886932        if (RT_SUCCESS(rc))
    887933        {
    888             for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
     934            for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++)
    889935            {
    890                 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
    891                 Assert(HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
     936                g_HvmR0.aCpuInfo[iCpu].fConfigured = true;
     937                Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
    892938            }
    893939
    894940            /* If the host provides a VT-x init API, then we'll rely on that for global init. */
    895             HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
     941            g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
    896942        }
    897943        else
     
    904950         */
    905951        /* Allocate one page per cpu for the global vt-x and amd-v pages */
    906         for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
     952        for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
    907953        {
    908             Assert(HWACCMR0Globals.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
     954            Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
    909955
    910956            if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
    911957            {
    912                 rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */);
     958                rc = RTR0MemObjAllocCont(&g_HvmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */);
    913959                AssertLogRelRCReturn(rc, rc);
    914960
    915                 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].hMemObj); Assert(pvR0);
     961                void *pvR0 = RTR0MemObjAddress(g_HvmR0.aCpuInfo[i].hMemObj); Assert(pvR0);
    916962                ASMMemZeroPage(pvR0);
    917963            }
    918             HWACCMR0Globals.aCpuInfo[i].fConfigured = false;
     964            g_HvmR0.aCpuInfo[i].fConfigured = false;
    919965        }
    920966
    921         if (HWACCMR0Globals.fGlobalInit)
     967        if (g_HvmR0.fGlobalInit)
    922968        {
    923969            /* First time, so initialize each cpu/core. */
    924             HWACCMR0FIRSTRC FirstRc;
    925             hwaccmR0FirstRcInit(&FirstRc);
    926             rc = RTMpOnAll(hwaccmR0EnableCpuCallback, (void *)pVM, &FirstRc);
     970            HMR0FIRSTRC FirstRc;
     971            hmR0FirstRcInit(&FirstRc);
     972            rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc);
    927973            if (RT_SUCCESS(rc))
    928                 rc = hwaccmR0FirstRcGetStatus(&FirstRc);
    929             AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
     974                rc = hmR0FirstRcGetStatus(&FirstRc);
     975            AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
    930976        }
    931977        else
     
    947993    /* Make sure we don't touch hwaccm after we've disabled hwaccm in
    948994       preparation of a suspend. */
    949     if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
     995    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    950996        return VERR_HWACCM_SUSPEND_PENDING;
    951997
    952     return RTOnce(&HWACCMR0Globals.EnableAllCpusOnce, hwaccmR0EnableAllCpuOnce, pVM, NULL);
     998    return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL);
    953999}
    9541000
     
    9601006 * @param   idCpu       The identifier for the CPU the function is called on.
    9611007 */
    962 static int hwaccmR0DisableCpu(RTCPUID idCpu)
    963 {
    964     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    965 
    966     Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
     1008static int hmR0DisableCpu(RTCPUID idCpu)
     1009{
     1010    PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
     1011
     1012    Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
    9671013    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    968     Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
    969     Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
     1014    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
     1015    Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    9701016    Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
    9711017
     
    9781024        void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
    9791025        RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    980         rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
     1026        rc = g_HvmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
    9811027        AssertRC(rc);
    9821028        pCpu->fConfigured = false;
     
    9981044 * @param   pvUser2     The 2nd user argument.
    9991045 */
    1000 static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    1001 {
    1002     PHWACCMR0FIRSTRC pFirstRc = (PHWACCMR0FIRSTRC)pvUser2;
    1003     AssertReturnVoid(HWACCMR0Globals.fGlobalInit);
    1004     hwaccmR0FirstRcSetStatus(pFirstRc, hwaccmR0DisableCpu(idCpu));
     1046static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     1047{
     1048    PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2;
     1049    AssertReturnVoid(g_HvmR0.fGlobalInit);
     1050    hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu));
    10051051}
    10061052
     
    10131059 * @param   pvData              Opaque data (PVM pointer).
    10141060 */
    1015 static DECLCALLBACK(void) hwaccmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)
     1061static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)
    10161062{
    10171063    /*
     
    10251071        case RTMPEVENT_OFFLINE:
    10261072        {
    1027             int rc = hwaccmR0DisableCpu(idCpu);
     1073            int rc = hmR0DisableCpu(idCpu);
    10281074            AssertRC(rc);
    10291075            break;
     
    10421088 * @param   pvUser          User argument
    10431089 */
    1044 static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
     1090static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
    10451091{
    10461092    NOREF(pvUser);
    1047     Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
     1093    Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
    10481094
    10491095#ifdef LOG_ENABLED
    10501096    if (enmEvent == RTPOWEREVENT_SUSPEND)
    1051         SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
     1097        SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
    10521098    else
    1053         SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_RESUME\n");
     1099        SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_RESUME\n");
    10541100#endif
    10551101
    10561102    if (enmEvent == RTPOWEREVENT_SUSPEND)
    1057         ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
    1058 
    1059     if (HWACCMR0Globals.fEnabled)
    1060     {
    1061         int             rc;
    1062         HWACCMR0FIRSTRC FirstRc;
    1063         hwaccmR0FirstRcInit(&FirstRc);
     1103        ASMAtomicWriteBool(&g_HvmR0.fSuspended, true);
     1104
     1105    if (g_HvmR0.fEnabled)
     1106    {
     1107        int         rc;
     1108        HMR0FIRSTRC FirstRc;
     1109        hmR0FirstRcInit(&FirstRc);
    10641110
    10651111        if (enmEvent == RTPOWEREVENT_SUSPEND)
    10661112        {
    1067             if (HWACCMR0Globals.fGlobalInit)
     1113            if (g_HvmR0.fGlobalInit)
    10681114            {
    10691115                /* Turn off VT-x or AMD-V on all CPUs. */
    1070                 rc = RTMpOnAll(hwaccmR0DisableCpuCallback, NULL, &FirstRc);
     1116                rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc);
    10711117                Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
    10721118            }
     
    10751121        else
    10761122        {
    1077             /* Reinit the CPUs from scratch as the suspend state might have messed with the MSRs. (lousy BIOSes as usual) */
    1078             uintptr_t uFirstArg = HWACCMR0Globals.vmx.fSupported ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX;
    1079             rc = RTMpOnAll(hwaccmR0InitCpu, (void *)uFirstArg , &FirstRc);
     1123            /* Reinit the CPUs from scratch as the suspend state might have
     1124               messed with the MSRs. (lousy BIOSes as usual) */
     1125            if (g_HvmR0.vmx.fSupported)
     1126                rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
     1127            else
     1128                rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
    10801129            Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
    10811130            if (RT_SUCCESS(rc))
    1082                 rc = hwaccmR0FirstRcGetStatus(&FirstRc);
     1131                rc = hmR0FirstRcGetStatus(&FirstRc);
    10831132#ifdef LOG_ENABLED
    10841133            if (RT_FAILURE(rc))
    1085                 SUPR0Printf("hwaccmR0PowerCallback hwaccmR0InitCpu failed with %Rc\n", rc);
     1134                SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc);
    10861135#endif
    1087 
    1088             if (HWACCMR0Globals.fGlobalInit)
     1136            if (g_HvmR0.fGlobalInit)
    10891137            {
    10901138                /* Turn VT-x or AMD-V back on on all CPUs. */
    1091                 rc = RTMpOnAll(hwaccmR0EnableCpuCallback, NULL, &FirstRc /* output ignored */);
     1139                rc = RTMpOnAll(hmR0EnableCpuCallback, NULL, &FirstRc /* output ignored */);
    10921140                Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
    10931141            }
     
    10971145
    10981146    if (enmEvent == RTPOWEREVENT_RESUME)
    1099         ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, false);
    1100 }
    1101 
    1102 
    1103 /**
    1104  * Does Ring-0 per VM HWACCM initialization.
    1105  *
    1106  * This is mainly to check that the Host CPU mode is compatible
    1107  * with VMX.
     1147        ASMAtomicWriteBool(&g_HvmR0.fSuspended, false);
     1148}
     1149
     1150
     1151/**
     1152 * Does Ring-0 per VM HM initialization.
     1153 *
     1154 * This will copy HM global into the VM structure and call the CPU specific
     1155 * init routine which will allocate resources for each virtual CPU and such.
    11081156 *
    11091157 * @returns VBox status code.
     
    11191167
    11201168    /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
    1121     if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
     1169    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    11221170        return VERR_HWACCM_SUSPEND_PENDING;
    11231171
    1124     pVM->hwaccm.s.vmx.fSupported            = HWACCMR0Globals.vmx.fSupported;
    1125     pVM->hwaccm.s.svm.fSupported            = HWACCMR0Globals.svm.fSupported;
    1126 
    1127     pVM->hwaccm.s.vmx.fUsePreemptTimer      = HWACCMR0Globals.vmx.fUsePreemptTimer;
    1128     pVM->hwaccm.s.vmx.cPreemptTimerShift    = HWACCMR0Globals.vmx.cPreemptTimerShift;
    1129     pVM->hwaccm.s.vmx.msr.feature_ctrl      = HWACCMR0Globals.vmx.msr.feature_ctrl;
    1130     pVM->hwaccm.s.vmx.hostCR4               = HWACCMR0Globals.vmx.hostCR4;
    1131     pVM->hwaccm.s.vmx.hostEFER              = HWACCMR0Globals.vmx.hostEFER;
    1132     pVM->hwaccm.s.vmx.msr.vmx_basic_info    = HWACCMR0Globals.vmx.msr.vmx_basic_info;
    1133     pVM->hwaccm.s.vmx.msr.vmx_pin_ctls      = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
    1134     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls     = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
    1135     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2    = HWACCMR0Globals.vmx.msr.vmx_proc_ctls2;
    1136     pVM->hwaccm.s.vmx.msr.vmx_exit          = HWACCMR0Globals.vmx.msr.vmx_exit;
    1137     pVM->hwaccm.s.vmx.msr.vmx_entry         = HWACCMR0Globals.vmx.msr.vmx_entry;
    1138     pVM->hwaccm.s.vmx.msr.vmx_misc          = HWACCMR0Globals.vmx.msr.vmx_misc;
    1139     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0    = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
    1140     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1    = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
    1141     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0    = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
    1142     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1    = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
    1143     pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum     = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
    1144     pVM->hwaccm.s.vmx.msr.vmx_eptcaps       = HWACCMR0Globals.vmx.msr.vmx_eptcaps;
    1145     pVM->hwaccm.s.svm.msrHWCR               = HWACCMR0Globals.svm.msrHWCR;
    1146     pVM->hwaccm.s.svm.u32Rev                = HWACCMR0Globals.svm.u32Rev;
    1147     pVM->hwaccm.s.svm.u32Features           = HWACCMR0Globals.svm.u32Features;
    1148     pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
    1149     pVM->hwaccm.s.cpuid.u32AMDFeatureEDX    = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
    1150     pVM->hwaccm.s.lLastError                = HWACCMR0Globals.lLastError;
    1151 
    1152     pVM->hwaccm.s.uMaxASID                  = HWACCMR0Globals.uMaxASID;
     1172    /*
     1173     * Copy globals to the VM structure.
     1174     */
     1175    pVM->hwaccm.s.vmx.fSupported            = g_HvmR0.vmx.fSupported;
     1176    pVM->hwaccm.s.svm.fSupported            = g_HvmR0.svm.fSupported;
     1177
     1178    pVM->hwaccm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
     1179    pVM->hwaccm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
     1180    pVM->hwaccm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.feature_ctrl;
     1181    pVM->hwaccm.s.vmx.hostCR4               = g_HvmR0.vmx.hostCR4;
     1182    pVM->hwaccm.s.vmx.hostEFER              = g_HvmR0.vmx.hostEFER;
     1183    pVM->hwaccm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.vmx_basic_info;
     1184    pVM->hwaccm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.vmx_pin_ctls;
     1185    pVM->hwaccm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.vmx_proc_ctls;
     1186    pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.vmx_proc_ctls2;
     1187    pVM->hwaccm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.vmx_exit;
     1188    pVM->hwaccm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.vmx_entry;
     1189    pVM->hwaccm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.vmx_misc;
     1190    pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
     1191    pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
     1192    pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
     1193    pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
     1194    pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
     1195    pVM->hwaccm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
     1196    pVM->hwaccm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
     1197    pVM->hwaccm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
     1198    pVM->hwaccm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
     1199    pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = g_HvmR0.cpuid.u32AMDFeatureECX;
     1200    pVM->hwaccm.s.cpuid.u32AMDFeatureEDX    = g_HvmR0.cpuid.u32AMDFeatureEDX;
     1201    pVM->hwaccm.s.lLastError                = g_HvmR0.lLastError;
     1202
     1203    pVM->hwaccm.s.uMaxASID                  = g_HvmR0.uMaxASID;
    11531204
    11541205
     
    11621213    }
    11631214
     1215    /*
     1216     * Initialize some per CPU fields.
     1217     */
    11641218    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    11651219    {
     
    11751229    }
    11761230
     1231    /*
     1232     * Call the hardware specific initialization method.
     1233     *
     1234     * Note! The fInUse handling here isn't correct as we can we can be
     1235     *       rescheduled to a different cpu, but the fInUse case is mostly for
     1236     *       debugging...  Disabling preemption isn't an option when allocating
     1237     *       memory, so we'll let it slip for now.
     1238     */
    11771239    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1178     PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1179 
    1180     /* Note: Not correct as we can be rescheduled to a different cpu, but the
    1181        fInUse case is mostly for debugging. */
     1240    PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
    11821241    ASMAtomicWriteBool(&pCpu->fInUse, true);
    11831242    ASMSetFlags(fFlags);
    11841243
    1185     /* Init a VT-x or AMD-V VM. */
    1186     int rc = HWACCMR0Globals.pfnInitVM(pVM);
     1244    int rc = g_HvmR0.pfnInitVM(pVM);
    11871245
    11881246    ASMAtomicWriteBool(&pCpu->fInUse, false);
     
    11921250
    11931251/**
    1194  * Does Ring-0 per VM HWACCM termination.
     1252 * Does Ring-0 per VM HM termination.
    11951253 *
    11961254 * @returns VBox status code.
     
    11991257VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
    12001258{
    1201     int             rc;
    1202 
     1259    Log(("HWACCMR0TermVM: %p\n", pVM));
    12031260    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    12041261
    1205 #ifdef LOG_ENABLED
    1206     SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
    1207 #endif
    1208 
    1209     /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
    1210     AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    1211 
    1212     /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
     1262    /* Make sure we don't touch hm after we've disabled hwaccm in preparation
     1263       of a suspend. */
     1264    /** @todo r=bird: This cannot be right, the termination functions are
     1265     *        just freeing memory and resetting pVM/pVCpu members...
     1266     *  ==> memory leak. */
     1267    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1268
     1269    /*
     1270     * Call the hardware specific method.
     1271     *
     1272     * Note! Not correct as we can be rescheduled to a different cpu, but the
     1273     *       fInUse case is mostly for debugging.
     1274     */
    12131275    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1214     PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1215 
     1276    PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
    12161277    ASMAtomicWriteBool(&pCpu->fInUse, true);
    12171278    ASMSetFlags(fFlags);
    12181279
    1219     /* Terminate a VT-x or AMD-V VM. */
    1220     rc = HWACCMR0Globals.pfnTermVM(pVM);
     1280    int rc = g_HvmR0.pfnTermVM(pVM);
    12211281
    12221282    ASMAtomicWriteBool(&pCpu->fInUse, false);
     
    12261286
    12271287/**
    1228  * Sets up a VT-x or AMD-V session
     1288 * Sets up a VT-x or AMD-V session.
     1289 *
     1290 * This is mostly about setting up the hardware VM state.
    12291291 *
    12301292 * @returns VBox status code.
     
    12331295VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
    12341296{
    1235     int             rc;
    1236     RTCPUID         idCpu = RTMpCpuId();
    1237     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    1238 
     1297    Log(("HWACCMR0SetupVM: %p\n", pVM));
    12391298    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    12401299
    1241     /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
    1242     AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    1243 
    1244 #ifdef LOG_ENABLED
    1245     SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
    1246 #endif
    1247 
     1300    /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     1301       preparation of a suspend. */
     1302    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1303
     1304
     1305    /*
     1306     * Call the hardware specific setup VM method.  This requires the CPU to be
     1307     * enabled for AMD-V/VT-x and preemption to be prevented.
     1308     */
     1309    RTCCUINTREG     fFlags = ASMIntDisableFlags();
     1310    RTCPUID         idCpu  = RTMpCpuId();
     1311    PHMGLOBLCPUINFO pCpu   = &g_HvmR0.aCpuInfo[idCpu];
    12481312    ASMAtomicWriteBool(&pCpu->fInUse, true);
    12491313
     1314    /* On first entry we'll sync everything. */
    12501315    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1251     {
    1252         /* On first entry we'll sync everything. */
    12531316        pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
    1254     }
    12551317
    12561318    /* Enable VT-x or AMD-V if local init is required. */
    1257     if (!HWACCMR0Globals.fGlobalInit)
    1258     {
    1259         rc = hwaccmR0EnableCpu(pVM, idCpu);
    1260         AssertRCReturn(rc, rc);
     1319    int rc;
     1320    if (!g_HvmR0.fGlobalInit)
     1321    {
     1322        rc = hmR0EnableCpu(pVM, idCpu);
     1323        AssertReturnStmt(RT_SUCCESS_NP(rc), ASMSetFlags(fFlags), rc);
    12611324    }
    12621325
    12631326    /* Setup VT-x or AMD-V. */
    1264     rc = HWACCMR0Globals.pfnSetupVM(pVM);
     1327    rc = g_HvmR0.pfnSetupVM(pVM);
    12651328
    12661329    /* Disable VT-x or AMD-V if local init was done before. */
    1267     if (!HWACCMR0Globals.fGlobalInit)
    1268     {
    1269         rc = hwaccmR0DisableCpu(idCpu);
    1270         AssertRC(rc);
     1330    if (!g_HvmR0.fGlobalInit)
     1331    {
     1332        int rc2 = hmR0DisableCpu(idCpu);
     1333        AssertRC(rc2);
    12711334    }
    12721335
    12731336    ASMAtomicWriteBool(&pCpu->fInUse, false);
     1337    ASMSetFlags(fFlags);
    12741338
    12751339    return rc;
     
    12831347 * @param   pVM        The VM to operate on.
    12841348 * @param   pVCpu      VMCPU handle.
     1349 *
     1350 * @remarks This is called with preemption disabled.
    12851351 */
    12861352VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
    12871353{
    1288     PCPUMCTX        pCtx;
    1289     int             rc;
    12901354    RTCPUID         idCpu = RTMpCpuId();
    1291     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    1292 
    1293     /* Make sure we can't enter a session after we've disabled hwaccm in preparation of a suspend. */
    1294     AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1355    PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
     1356
     1357    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
     1358    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    12951359    ASMAtomicWriteBool(&pCpu->fInUse, true);
    12961360
     
    12981362    pVCpu->hwaccm.s.idEnteredCpu = idCpu;
    12991363
    1300     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     1364    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    13011365
    13021366    /* Always load the guest's FPU/XMM state on-demand. */
     
    13151379        pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
    13161380
    1317     /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
     1381    /* Enable VT-x or AMD-V if local init is required, or enable if it's a
     1382       freshly onlined CPU. */
     1383    int rc;
    13181384    if (   !pCpu->fConfigured
    1319         || !HWACCMR0Globals.fGlobalInit)
    1320     {
    1321         rc = hwaccmR0EnableCpu(pVM, idCpu);
     1385        || !g_HvmR0.fGlobalInit)
     1386    {
     1387        rc = hmR0EnableCpu(pVM, idCpu);
    13221388        AssertRCReturn(rc, rc);
    13231389    }
     
    13271393#endif
    13281394
    1329     rc  = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu);
     1395    rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    13301396    AssertRC(rc);
    1331     /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */
    1332     rc |= HWACCMR0Globals.pfnSaveHostState(pVM, pVCpu);
     1397    /* We must save the host context here (VT-x) as we might be rescheduled on
     1398       a different cpu after a long jump back to ring 3. */
     1399    rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    13331400    AssertRC(rc);
    1334     rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pVCpu, pCtx);
     1401    rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    13351402    AssertRC(rc);
    13361403
     
    13401407#endif
    13411408
    1342     /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
     1409    /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
     1410       and ring-3 calls. */
    13431411    if (RT_FAILURE(rc))
    13441412        pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
     
    13531421 * @param   pVM        The VM to operate on.
    13541422 * @param   pVCpu      VMCPU handle.
     1423 *
     1424 * @remarks Called with preemption disabled just like HWACCMR0Enter, our
     1425 *          counterpart.
    13551426 */
    13561427VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
     
    13581429    int             rc;
    13591430    RTCPUID         idCpu = RTMpCpuId();
    1360     PHWACCM_CPUINFO pCpu  = &HWACCMR0Globals.aCpuInfo[idCpu];
     1431    PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
    13611432    PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
    13621433
    1363 
    13641434    /** @todo r=bird: This can't be entirely right? */
    1365     AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    1366 
    1367     /* Note:  It's rather tricky with longjmps done by e.g. Log statements or the page fault handler.
    1368      *        We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
    1369      *        or trash somebody else's FPU state.
     1435    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1436
     1437    /*
     1438     * Save the guest FPU and XMM state if necessary.
     1439     *
     1440     * Note! It's rather tricky with longjmps done by e.g. Log statements or
     1441     *       the page fault handler.  We must restore the host FPU here to make
     1442     *       absolutely sure we don't leave the guest FPU state active or trash
     1443     *       somebody else's FPU state.
    13701444     */
    1371     /* Save the guest FPU and XMM state if necessary. */
    13721445    if (CPUMIsGuestFPUStateActive(pVCpu))
    13731446    {
     
    13791452    }
    13801453
    1381     rc = HWACCMR0Globals.pfnLeaveSession(pVM, pVCpu, pCtx);
    1382 
    1383     /* We don't pass on invlpg information to the recompiler for nested paging guests, so we must make sure the recompiler flushes its TLB
    1384      * the next time it executes code.
    1385      */
     1454    rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
     1455
     1456    /* We don't pass on invlpg information to the recompiler for nested paging
     1457       guests, so we must make sure the recompiler flushes its TLB the next
     1458       time it executes code. */
    13861459    if (    pVM->hwaccm.s.fNestedPaging
    13871460        &&  CPUMIsGuestInPagedProtectedModeEx(pCtx))
    13881461        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    13891462
    1390     /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
     1463    /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
     1464       and ring-3 calls. */
    13911465#ifdef RT_STRICT
    13921466    if (RT_UNLIKELY(   pVCpu->hwaccm.s.idEnteredCpu != idCpu
     
    13991473    pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
    14001474
    1401     /* Disable VT-x or AMD-V if local init was done before. */
    1402     if (!HWACCMR0Globals.fGlobalInit)
    1403     {
    1404         rc = hwaccmR0DisableCpu(idCpu);
     1475    /*
     1476     * Disable VT-x or AMD-V if local init was done before.
     1477     */
     1478    if (!g_HvmR0.fGlobalInit)
     1479    {
     1480        rc = hmR0DisableCpu(idCpu);
    14051481        AssertRC(rc);
    14061482
     
    14151491}
    14161492
     1493
    14171494/**
    14181495 * Runs guest code in a hardware accelerated VM.
     
    14201497 * @returns VBox status code.
    14211498 * @param   pVM         The VM to operate on.
    1422  * @param   pVCpu      VMCPUD id.
     1499 * @param   pVCpu       VMCPUD id.
     1500 *
     1501 * @remarks Called with preemption disabled and after first having called
     1502 *          HWACCMR0Enter.
    14231503 */
    14241504VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
    14251505{
    14261506#ifdef VBOX_STRICT
    1427     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[RTMpCpuId()];
     1507    PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[RTMpCpuId()];
    14281508    Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    14291509    Assert(pCpu->fConfigured);
    1430     AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1510    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    14311511    Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
    14321512#endif
     
    14361516#endif
    14371517
    1438     int rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));
     1518    int rc = g_HvmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));
    14391519
    14401520#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    14591539    if (pVM->hwaccm.s.vmx.fSupported)
    14601540        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
    1461 
    14621541    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
    14631542}
     1543
    14641544
    14651545/**
     
    14761556    if (pVM->hwaccm.s.vmx.fSupported)
    14771557        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
    1478 
    14791558    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
    14801559}
     1560
    14811561
    14821562/**
     
    14891569{
    14901570    PVMCPU   pVCpu = &pVM->aCpus[0];
    1491     CPUMCTX *pCtx;
     1571    PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
    14921572    uint32_t aParam[5] = {0, 1, 2, 3, 4};
    14931573    int      rc;
    1494 
    1495     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    14961574
    14971575    STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     
    15011579        rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
    15021580    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     1581
    15031582    return rc;
    15041583}
     
    15111590 * @returns Suspend pending or not
    15121591 */
    1513 VMMR0DECL(bool) HWACCMR0SuspendPending()
    1514 {
    1515     return ASMAtomicReadBool(&HWACCMR0Globals.fSuspended);
    1516 }
     1592VMMR0DECL(bool) HWACCMR0SuspendPending(void)
     1593{
     1594    return ASMAtomicReadBool(&g_HvmR0.fSuspended);
     1595}
     1596
    15171597
    15181598/**
     
    15221602 * @returns cpu structure pointer
    15231603 */
    1524 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu(void)
     1604VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void)
    15251605{
    15261606    RTCPUID idCpu = RTMpCpuId();
    1527     Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
    1528     return &HWACCMR0Globals.aCpuInfo[idCpu];
    1529 }
     1607    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
     1608    return &g_HvmR0.aCpuInfo[idCpu];
     1609}
     1610
    15301611
    15311612/**
     
    15361617 * @param   idCpu       id of the VCPU
    15371618 */
    1538 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
    1539 {
    1540     return &HWACCMR0Globals.aCpuInfo[idCpu];
    1541 }
     1619VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
     1620{
     1621    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
     1622    return &g_HvmR0.aCpuInfo[idCpu];
     1623}
     1624
    15421625
    15431626/**
     
    15621645}
    15631646
     1647
    15641648/**
    15651649 * Save a pending IO write.
     
    15971681    *pfVTxDisabled = false;
    15981682
    1599     if (   !HWACCMR0Globals.fEnabled
    1600         || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */
    1601         || !HWACCMR0Globals.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
     1683    if (   !g_HvmR0.fEnabled
     1684        || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */
     1685        || !g_HvmR0.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
    16021686        return VINF_SUCCESS;    /* nothing to do */
    16031687
    1604     switch(VMMGetSwitcher(pVM))
     1688    switch (VMMGetSwitcher(pVM))
    16051689    {
    16061690        case VMMSWITCHER_32_TO_32:
     
    16191703    }
    16201704
    1621     PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
     1705    PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    16221706    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR);
    16231707
     
    16281712}
    16291713
     1714
    16301715/**
    16311716 * Raw-mode switcher hook - re-enable VT-x if was active *and* the current
     
    16431728        return VINF_SUCCESS;    /* nothing to do */
    16441729
    1645     Assert(HWACCMR0Globals.fEnabled);
    1646     Assert(HWACCMR0Globals.vmx.fSupported);
    1647     Assert(HWACCMR0Globals.fGlobalInit);
    1648 
    1649     PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
     1730    Assert(g_HvmR0.fEnabled);
     1731    Assert(g_HvmR0.vmx.fSupported);
     1732    Assert(g_HvmR0.fGlobalInit);
     1733
     1734    PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    16501735    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR);
    16511736
     
    16561741
    16571742#ifdef VBOX_STRICT
     1743
    16581744/**
    16591745 * Dumps a descriptor.
     
    17781864}
    17791865
     1866
    17801867/**
    17811868 * Formats a full register dump.
     
    19242011
    19252012}
     2013
    19262014#endif /* VBOX_STRICT */
    19272015
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r37319 r37320  
    6767 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    6868 */
    69 VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     69VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    7070{
    7171    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     
    104104 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    105105 */
    106 VMMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     106VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    107107{
    108108    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     
    967967    unsigned    cResume = 0;
    968968    uint8_t     u8LastTPR;
    969     PHWACCM_CPUINFO pCpu = 0;
     969    PHMGLOBLCPUINFO pCpu = 0;
    970970    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
    971971#ifdef VBOX_STRICT
     
    26242624 * @param   pCpu        CPU info struct
    26252625 */
    2626 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
     2626VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    26272627{
    26282628    Assert(pVM->hwaccm.s.svm.fSupported);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r37319 r37320  
    4646 * @param   pCpu        CPU info struct
    4747 */
    48 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu);
     48VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
    4949
    5050/**
     
    6767 * @param   pPageCpuPhys    Physical address of the global cpu page
    6868 */
    69 VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage);
     69VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage);
    7070
    7171/**
     
    7777 * @param   pPageCpuPhys    Physical address of the global cpu page
    7878 */
    79 VMMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     79VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    8080
    8181/**
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r37319 r37320  
    104104 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    105105 */
    106 VMMR0DECL(int) VMXR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     106VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    107107{
    108108    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     
    143143 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    144144 */
    145 VMMR0DECL(int) VMXR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     145VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    146146{
    147147    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     
    21442144static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu)
    21452145{
    2146     PHWACCM_CPUINFO pCpu;
     2146    PHMGLOBLCPUINFO pCpu;
    21472147
    21482148    Assert(pVM->hwaccm.s.fNestedPaging);
     
    22072207static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu)
    22082208{
    2209     PHWACCM_CPUINFO pCpu;
     2209    PHMGLOBLCPUINFO pCpu;
    22102210
    22112211    Assert(pVM->hwaccm.s.vmx.fVPID);
     
    25902590        )
    25912591    {
    2592         PHWACCM_CPUINFO pCpu;
     2592        PHMGLOBLCPUINFO pCpu;
    25932593
    25942594        pCpu = HWACCMR0GetCurrentCpu();
     
    42664266 * @param   pCpu        CPU info struct
    42674267 */
    4268 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
     4268VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    42694269{
    42704270    Assert(pVM->hwaccm.s.vmx.fSupported);
     
    46154615{
    46164616    uint32_t        aParam[6];
    4617     PHWACCM_CPUINFO pCpu;
     4617    PHMGLOBLCPUINFO pCpu;
    46184618    RTHCPHYS        HCPhysCpuPage;
    46194619    int             rc;
     
    46834683{
    46844684    int             rc, rc2;
    4685     PHWACCM_CPUINFO pCpu;
     4685    PHMGLOBLCPUINFO pCpu;
    46864686    RTHCPHYS        HCPhysCpuPage;
    46874687    RTHCUINTREG     uOldEFlags;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r35346 r37320  
    110110 * @param   pCpu        CPU info struct
    111111 */
    112 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu);
     112VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
    113113
    114114/**
     
    132132 * @param   pPageCpuPhys    Physical address of the global cpu page
    133133 */
    134 VMMR0DECL(int) VMXR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     134VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    135135
    136136/**
     
    142142 * @param   pPageCpuPhys    Physical address of the global cpu page
    143143 */
    144 VMMR0DECL(int) VMXR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     144VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    145145
    146146/**
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r37228 r37320  
    921921         */
    922922        case VMMR0_DO_HWACC_SETUP_VM:
    923         {
    924             RTCCUINTREG fFlags = ASMIntDisableFlags();
    925             int rc = HWACCMR0SetupVM(pVM);
    926             ASMSetFlags(fFlags);
    927             return rc;
    928         }
     923            return HWACCMR0SetupVM(pVM);
    929924
    930925        /*
  • trunk/src/VBox/VMM/include/HWACCMInternal.h

    r37319 r37320  
    11/* $Id$ */
    22/** @file
    3  * HWACCM - Internal header file.
     3 * HM - Internal header file.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2011 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    140140#define HWACCM_VTX_TOTAL_DEVHEAP_MEM        (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
    141141
    142 /* Enable for TPR guest patching. */
     142/** Enable for TPR guest patching. */
    143143#define VBOX_HWACCM_WITH_GUEST_PATCHING
    144144
     
    146146 */
    147147#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
    148 #define HWACCM_SSM_VERSION                  5
    149 #define HWACCM_SSM_VERSION_NO_PATCHING      4
     148# define HWACCM_SSM_VERSION                 5
     149# define HWACCM_SSM_VERSION_NO_PATCHING     4
    150150#else
    151 #define HWACCM_SSM_VERSION                  4
    152 #define HWACCM_SSM_VERSION_NO_PATCHING      4
     151# define HWACCM_SSM_VERSION                 4
     152# define HWACCM_SSM_VERSION_NO_PATCHING     4
    153153#endif
    154154#define HWACCM_SSM_VERSION_2_0_X            3
     
    157157 * Global per-cpu information. (host)
    158158 */
    159 typedef struct
     159typedef struct HMGLOBLCPUINFO
    160160{
    161161    /** The CPU ID. */
     
    163163    /** The memory object   */
    164164    RTR0MEMOBJ          hMemObj;
    165     /* Current ASID (AMD-V)/VPID (Intel) */
     165    /** Current ASID (AMD-V) / VPID (Intel). */
    166166    uint32_t            uCurrentASID;
    167     /* TLB flush count */
     167    /** TLB flush count. */
    168168    uint32_t            cTLBFlushes;
    169169
    170     /* Set the first time a cpu is used to make sure we start with a clean TLB. */
     170    /** Set the first time a cpu is used to make sure we start with a clean TLB. */
    171171    bool                fFlushTLB;
    172172
     
    179179    /** In use by our code. (for power suspend) */
    180180    volatile bool       fInUse;
    181 } HWACCM_CPUINFO;
    182 typedef HWACCM_CPUINFO *PHWACCM_CPUINFO;
     181} HMGLOBLCPUINFO;
     182/** Pointer to the per-cpu global information. */
     183typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
    183184
    184185typedef enum
     
    886887#ifdef IN_RING0
    887888
    888 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu(void);
    889 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
     889VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
     890VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
    890891
    891892
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette