VirtualBox

Changeset 37319 in vbox


Ignore:
Timestamp:
Jun 3, 2011 1:12:37 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
72074
Message:

HWACCM: Use RTOnce to serialize the enabling so that no CPU can start execute code before it has been fully enabled to do so...

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hwaccm.h

    r36441 r37319  
    3535 * @{
    3636 */
    37 
    38 /**
    39  * HWACCM state
    40  */
    41 typedef enum HWACCMSTATE
    42 {
    43     /* Not yet set */
    44     HWACCMSTATE_UNINITIALIZED = 0,
    45     /* Enabled */
    46     HWACCMSTATE_ENABLED,
    47     /* Disabled */
    48     HWACCMSTATE_DISABLED,
    49     /** The usual 32-bit hack. */
    50     HWACCMSTATE_32BIT_HACK = 0x7fffffff
    51 } HWACCMSTATE;
    5237
    5338RT_C_DECLS_BEGIN
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r37298 r37319  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2011 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    3636#include <iprt/mem.h>
    3737#include <iprt/memobj.h>
     38#include <iprt/once.h>
    3839#include <iprt/param.h>
    3940#include <iprt/power.h>
     
    4243#include "HWVMXR0.h"
    4344#include "HWSVMR0.h"
     45
    4446
    4547/*******************************************************************************
     
    5355static DECLCALLBACK(void) hwaccmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
    5456
     57
    5558/*******************************************************************************
    5659*   Global Variables                                                           *
     
    6770    DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    6871    DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    69     DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
    70     DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
     72    DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     73    DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    7174    DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
    7275    DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
     
    114117        uint32_t                    ulLastInstrError;
    115118    } vmx;
     119
    116120    struct
    117121    {
     
    137141    } cpuid;
    138142
    139     HWACCMSTATE                     enmHwAccmState;
    140 
     143    /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's
     144     * enabled and disabled each time it's used to execute guest code. */
    141145    bool                            fGlobalInit;
     146    /** Indicates whether the host is suspending or not.  We'll refuse a few
     147     *  actions when the host is being suspended to speed up the suspending and
     148     *  avoid trouble. */
    142149    volatile        bool            fSuspended;
     150
     151    /** Whether we've already initialized all CPUs.
     152     * @remarks We could check the EnableAllCpusOnce state, but this is
     153     *          simpler and hopefully easier to understand. */
     154    bool                            fEnabled;
     155    /** Serialize initialization in HWACCMR0EnableAllCpus. */
     156    RTONCE                          EnableAllCpusOnce;
    143157} HWACCMR0Globals;
    144158
     
    222236}
    223237
    224 static DECLCALLBACK(int) hwaccmR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     238static DECLCALLBACK(int) hwaccmR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    225239{
    226240    return VINF_SUCCESS;
    227241}
    228242
    229 static DECLCALLBACK(int) hwaccmR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     243static DECLCALLBACK(int) hwaccmR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    230244{
    231245    return VINF_SUCCESS;
     
    267281
    268282/**
    269  * Does global Ring-0 HWACCM initialization.
     283 * Does global Ring-0 HWACCM initialization (at module init).
    270284 *
    271285 * @returns VBox status code.
     
    273287VMMR0DECL(int) HWACCMR0Init(void)
    274288{
    275     int     rc;
    276     bool    fAMDVPresent = false;
    277 
    278     HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
     289
     290    /*
     291     * Initialize the globals.
     292     */
     293    HWACCMR0Globals.fEnabled = false;
     294    static RTONCE s_OnceInit = RTONCE_INITIALIZER;
     295    HWACCMR0Globals.EnableAllCpusOnce = s_OnceInit;
    279296    for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
    280         HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
     297        HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
    281298
    282299    /* Fill in all callbacks with placeholders. */
     
    307324     * Check for VT-x and AMD-V capabilities
    308325     */
     326    int rc;
    309327    if (ASMHasCpuId())
    310328    {
    311         uint32_t u32FeaturesECX;
     329        uint32_t u32FeaturesECX, u32FeaturesEDX;
     330        uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
    312331        uint32_t u32Dummy;
    313         uint32_t u32FeaturesEDX;
    314         uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
    315 
     332
     333        /* STandard features. */
    316334        ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
    317335        ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
     336
    318337        /* Query AMD features. */
    319         ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
    320 
    321         if (    u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
    322             &&  u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
    323             &&  u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
     338        ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX,
     339                 &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
     340
     341        /*
     342         * Intel CPU?
     343         */
     344        if (   u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
     345            && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
     346            && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
    324347           )
    325348        {
     
    333356               )
    334357            {
     358                /** @todo move this into a separate function. */
    335359                HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    336360
     
    400424                            HWACCMR0Globals.vmx.hostEFER            = ASMRdMsr(MSR_K6_EFER);
    401425
    402                             rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     426                            rc = RTR0MemObjAllocCont(&pScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */);
    403427                            if (RT_FAILURE(rc))
    404428                                return rc;
     
    470494                        HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
    471495                    }
     496
     497                    if (HWACCMR0Globals.vmx.fSupported)
     498                    {
     499                        HWACCMR0Globals.pfnEnterSession     = VMXR0Enter;
     500                        HWACCMR0Globals.pfnLeaveSession     = VMXR0Leave;
     501                        HWACCMR0Globals.pfnSaveHostState    = VMXR0SaveHostState;
     502                        HWACCMR0Globals.pfnLoadGuestState   = VMXR0LoadGuestState;
     503                        HWACCMR0Globals.pfnRunGuestCode     = VMXR0RunGuestCode;
     504                        HWACCMR0Globals.pfnEnableCpu        = VMXR0EnableCpu;
     505                        HWACCMR0Globals.pfnDisableCpu       = VMXR0DisableCpu;
     506                        HWACCMR0Globals.pfnInitVM           = VMXR0InitVM;
     507                        HWACCMR0Globals.pfnTermVM           = VMXR0TermVM;
     508                        HWACCMR0Globals.pfnSetupVM          = VMXR0SetupVM;
     509                    }
    472510                }
    473511#ifdef LOG_ENABLED
     
    479517                HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
    480518        }
    481         else
    482         if (    u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
    483             &&  u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
    484             &&  u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
    485            )
     519        /*
     520         * AMD CPU?
     521         */
     522        else if (   u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
     523                 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
     524                 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
     525                )
    486526        {
     527            /** @todo move this into a separate function. */
     528
    487529            /*
    488530             * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
     
    494536               )
    495537            {
    496                 fAMDVPresent = true;
     538                HWACCMR0Globals.pfnEnterSession     = SVMR0Enter;
     539                HWACCMR0Globals.pfnLeaveSession     = SVMR0Leave;
     540                HWACCMR0Globals.pfnSaveHostState    = SVMR0SaveHostState;
     541                HWACCMR0Globals.pfnLoadGuestState   = SVMR0LoadGuestState;
     542                HWACCMR0Globals.pfnRunGuestCode     = SVMR0RunGuestCode;
     543                HWACCMR0Globals.pfnEnableCpu        = SVMR0EnableCpu;
     544                HWACCMR0Globals.pfnDisableCpu       = SVMR0DisableCpu;
     545                HWACCMR0Globals.pfnInitVM           = SVMR0InitVM;
     546                HWACCMR0Globals.pfnTermVM           = SVMR0TermVM;
     547                HWACCMR0Globals.pfnSetupVM          = SVMR0SetupVM;
    497548
    498549                /* Query AMD features. */
    499                 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
     550                ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID,
     551                         &u32Dummy, &HWACCMR0Globals.svm.u32Features);
    500552
    501553                /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
     
    506558                    rc = hwaccmR0FirstRcGetStatus(&FirstRc);
    507559#ifndef DEBUG_bird
    508                 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE, ("hwaccmR0InitCpu failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
     560                AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
     561                          ("hwaccmR0InitCpu failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
    509562#endif
    510563                if (RT_SUCCESS(rc))
     
    520573                HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
    521574        }
     575        /*
     576         * Unknown CPU.
     577         */
    522578        else
    523579            HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
     
    526582        HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
    527583
    528     if (HWACCMR0Globals.vmx.fSupported)
    529     {
    530         HWACCMR0Globals.pfnEnterSession     = VMXR0Enter;
    531         HWACCMR0Globals.pfnLeaveSession     = VMXR0Leave;
    532         HWACCMR0Globals.pfnSaveHostState    = VMXR0SaveHostState;
    533         HWACCMR0Globals.pfnLoadGuestState   = VMXR0LoadGuestState;
    534         HWACCMR0Globals.pfnRunGuestCode     = VMXR0RunGuestCode;
    535         HWACCMR0Globals.pfnEnableCpu        = VMXR0EnableCpu;
    536         HWACCMR0Globals.pfnDisableCpu       = VMXR0DisableCpu;
    537         HWACCMR0Globals.pfnInitVM           = VMXR0InitVM;
    538         HWACCMR0Globals.pfnTermVM           = VMXR0TermVM;
    539         HWACCMR0Globals.pfnSetupVM          = VMXR0SetupVM;
    540     }
    541     else
    542     if (fAMDVPresent)
    543     {
    544         HWACCMR0Globals.pfnEnterSession     = SVMR0Enter;
    545         HWACCMR0Globals.pfnLeaveSession     = SVMR0Leave;
    546         HWACCMR0Globals.pfnSaveHostState    = SVMR0SaveHostState;
    547         HWACCMR0Globals.pfnLoadGuestState   = SVMR0LoadGuestState;
    548         HWACCMR0Globals.pfnRunGuestCode     = SVMR0RunGuestCode;
    549         HWACCMR0Globals.pfnEnableCpu        = SVMR0EnableCpu;
    550         HWACCMR0Globals.pfnDisableCpu       = SVMR0DisableCpu;
    551         HWACCMR0Globals.pfnInitVM           = SVMR0InitVM;
    552         HWACCMR0Globals.pfnTermVM           = SVMR0TermVM;
    553         HWACCMR0Globals.pfnSetupVM          = SVMR0SetupVM;
    554     }
    555 
     584    /*
     585     * Register notification callbacks that we can use to disable/enable CPUs
     586     * when brought offline/online or suspending/resuming.
     587     */
    556588    if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
    557589    {
     
    563595    }
    564596
     597    /* We return success here because module init shall not fail if HWACCM
     598       fails to initialize. */
    565599    return VINF_SUCCESS;
    566600}
     
    623657        && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
    624658    {
     659        /*
     660         * Simple if the host OS manages VT-x.
     661         */
    625662        Assert(HWACCMR0Globals.fGlobalInit);
    626663        rc = SUPR0EnableVTx(false /* fEnable */);
     664
    627665        for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
    628666        {
    629667            HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;
    630             Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
     668            Assert(HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
    631669        }
    632670    }
     
    660698        for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
    661699        {
    662             if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ)
     700            if (HWACCMR0Globals.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
    663701            {
    664                 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
    665                 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
     702                RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].hMemObj, false);
     703                HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
    666704            }
    667705        }
     
    671709
    672710
    673 /**
    674  * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
    675  * is to be called on the target cpus.
     711
     712/**
     713 * Worker function used by hwaccmR0PowerCallback  and HWACCMR0Init to initalize
     714 * VT-x / AMD-V on a CPU.
    676715 *
    677716 * @param   idCpu       The identifier for the CPU the function is called on.
    678  * @param   pvUser1     The 1st user argument.
    679  * @param   pvUser2     The 2nd user argument.
     717 * @param   pvUser1     The EBX value of CPUID(0).
     718 * @param   pvUser2     Pointer to the first RC structure.
    680719 */
    681720static DECLCALLBACK(void) hwaccmR0InitCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     
    696735         * Once the lock bit is set, this MSR can no longer be modified.
    697736         */
    698         if (    !(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    699             ||  ((val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) == MSR_IA32_FEATURE_CONTROL_VMXON) /* Some BIOSes forget to set the locked bit. */
     737        if (   !(val    & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     738            || (   (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     739                == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */
    700740           )
    701741        {
     
    704744            val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    705745        }
    706         if (   (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    707                    == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
     746        if (   (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
     747            == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
    708748            rc = VINF_SUCCESS;
    709749        else
     
    750790
    751791/**
    752  * Sets up HWACCM on all cpus.
    753  *
    754  * @returns VBox status code.
    755  * @param   pVM                 The VM to operate on.
    756  *
    757  */
    758 VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
    759 {
    760     AssertCompile(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
    761 
    762     /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
    763     if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
    764         return VERR_HWACCM_SUSPEND_PENDING;
    765 
    766 /** @todo r=bird: Here be dragons and they are racing one another...
    767  *  The problem is that allocating memory for the 128 CPUs might take longer
    768  *  than for the 2nd and 3rd cpu to get into the HWACCMR0SetupVM code. So, they
    769  *  may end up there without having enabled VT-x and #UD on a VMCLEAN for
    770  *  instance.   Or that's what it looks like at least. */
    771     if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, HWACCMSTATE_ENABLED, HWACCMSTATE_UNINITIALIZED))
    772     {
    773         int rc;
    774 
    775         HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
    776 
    777         if (   HWACCMR0Globals.vmx.fSupported
    778             && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
    779         {
    780             rc = SUPR0EnableVTx(true /* fEnable */);
    781             if (RT_SUCCESS(rc))
    782             {
    783                 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
    784                 {
    785                     HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
    786                     Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
    787                 }
    788                 /* If the host provides a VT-x init API, then we'll rely on that for global init. */
    789                 HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
    790             }
    791             else
    792                 AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
    793         }
    794         else
    795         {
    796             /* Allocate one page per cpu for the global vt-x and amd-v pages */
    797             for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
    798             {
    799                 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
    800 
    801                 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
    802                 {
    803                     rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
    804                     AssertRC(rc);
    805                     if (RT_FAILURE(rc))
    806                         return rc;
    807 
    808                     void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
    809                     Assert(pvR0);
    810                     ASMMemZeroPage(pvR0);
    811 
    812 #if defined(LOG_ENABLED) && !defined(DEBUG_bird)
    813                     SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
    814 #endif
    815                 }
    816                 HWACCMR0Globals.aCpuInfo[i].fConfigured = false;
    817             }
    818             if (HWACCMR0Globals.fGlobalInit)
    819             {
    820                 /* First time, so initialize each cpu/core */
    821                 HWACCMR0FIRSTRC FirstRc;
    822                 hwaccmR0FirstRcInit(&FirstRc);
    823                 rc = RTMpOnAll(hwaccmR0EnableCpuCallback, (void *)pVM, &FirstRc);
    824                 if (RT_SUCCESS(rc))
    825                     rc = hwaccmR0FirstRcGetStatus(&FirstRc);
    826                 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
    827             }
    828             else
    829                 rc = VINF_SUCCESS;
    830         }
    831 
    832         return rc;
    833     }
    834     return VINF_SUCCESS;
    835 }
    836 
    837 /**
    838792 * Disable VT-x or AMD-V on the current CPU
    839793 *
     
    844798static int hwaccmR0EnableCpu(PVM pVM, RTCPUID idCpu)
    845799{
    846     void           *pvPageCpu;
    847     RTHCPHYS        pPageCpuPhys;
    848800    PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    849801
     
    854806    Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    855807
    856     pCpu->idCpu     = idCpu;
     808    pCpu->idCpu         = idCpu;
    857809
    858810    /* Make sure we start with a clean TLB. */
    859     pCpu->fFlushTLB = true;
    860 
    861     pCpu->uCurrentASID = 0;   /* we'll aways increment this the first time (host uses ASID 0) */
    862     pCpu->cTLBFlushes  = 0;
     811    pCpu->fFlushTLB     = true;
     812
     813    pCpu->uCurrentASID  = 0;    /* we'll aways increment this the first time (host uses ASID 0) */
     814    pCpu->cTLBFlushes   = 0;
    863815
    864816    /* Should never happen */
    865     if (!pCpu->pMemObj)
     817    if (pCpu->hMemObj == NIL_RTR0MEMOBJ)
    866818    {
    867819        AssertLogRelMsgFailed(("hwaccmR0EnableCpu failed idCpu=%u.\n", idCpu));
     
    869821    }
    870822
    871     pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
    872     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
    873 
    874     int rc  = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
     823    void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     824    RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     825
     826    int rc = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
    875827    AssertRC(rc);
    876828    if (RT_SUCCESS(rc))
     
    899851
    900852/**
     853 * RTOnce callback employed by HWACCMR0EnableAllCpus.
     854 *
     855 * @returns VBox status code
     856 * @param   pvUser          The VM handle.
     857 * @param   pvUserIgnore    NULL, ignored.
     858 */
     859static DECLCALLBACK(int32_t) hwaccmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore)
     860{
     861    PVM pVM = (PVM)pvUser;
     862    NOREF(pvUserIgnore);
     863
     864    /*
     865     * Indicate that we've initialized.
     866     *
     867     * Note! There is a potential race between this function and the suspend
     868     *       notification.  Kind of unlikely though, so ignored for now.
     869     */
     870    AssertReturn(!HWACCMR0Globals.fEnabled, VERR_INTERNAL_ERROR_3);
     871    ASMAtomicWriteBool(&HWACCMR0Globals.fEnabled, true);
     872
     873    /*
     874     * The global init variable is set by the first VM.
     875     */
     876    HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
     877
     878    int rc;
     879    if (   HWACCMR0Globals.vmx.fSupported
     880        && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
     881    {
     882        /*
     883         * Global VT-x initialization API (only darwin for now).
     884         */
     885        rc = SUPR0EnableVTx(true /* fEnable */);
     886        if (RT_SUCCESS(rc))
     887        {
     888            for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
     889            {
     890                HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
     891                Assert(HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
     892            }
     893
     894            /* If the host provides a VT-x init API, then we'll rely on that for global init. */
     895            HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
     896        }
     897        else
     898            AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
     899    }
     900    else
     901    {
     902        /*
     903         * We're doing the job ourselves.
     904         */
     905        /* Allocate one page per cpu for the global vt-x and amd-v pages */
     906        for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
     907        {
     908            Assert(HWACCMR0Globals.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
     909
     910            if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
     911            {
     912                rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */);
     913                AssertLogRelRCReturn(rc, rc);
     914
     915                void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].hMemObj); Assert(pvR0);
     916                ASMMemZeroPage(pvR0);
     917            }
     918            HWACCMR0Globals.aCpuInfo[i].fConfigured = false;
     919        }
     920
     921        if (HWACCMR0Globals.fGlobalInit)
     922        {
     923            /* First time, so initialize each cpu/core. */
     924            HWACCMR0FIRSTRC FirstRc;
     925            hwaccmR0FirstRcInit(&FirstRc);
     926            rc = RTMpOnAll(hwaccmR0EnableCpuCallback, (void *)pVM, &FirstRc);
     927            if (RT_SUCCESS(rc))
     928                rc = hwaccmR0FirstRcGetStatus(&FirstRc);
     929            AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));
     930        }
     931        else
     932            rc = VINF_SUCCESS;
     933    }
     934
     935    return rc;
     936}
     937
     938
     939/**
     940 * Sets up HWACCM on all cpus.
     941 *
     942 * @returns VBox status code.
     943 * @param   pVM                 The VM handle.
     944 */
     945VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
     946{
     947    /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     948       preparation of a suspend. */
     949    if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
     950        return VERR_HWACCM_SUSPEND_PENDING;
     951
     952    return RTOnce(&HWACCMR0Globals.EnableAllCpusOnce, hwaccmR0EnableAllCpuOnce, pVM, NULL);
     953}
     954
     955
     956/**
    901957 * Disable VT-x or AMD-V on the current CPU
    902958 *
     
    906962static int hwaccmR0DisableCpu(RTCPUID idCpu)
    907963{
    908     void           *pvPageCpu;
    909     RTHCPHYS        pPageCpuPhys;
    910     int             rc;
    911964    PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    912965
     
    915968    Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
    916969    Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    917     Assert(!pCpu->fConfigured || pCpu->pMemObj);
    918 
    919     if (!pCpu->pMemObj)
    920         return (pCpu->fConfigured) ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
    921 
    922     pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
    923     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
    924 
     970    Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
     971
     972    if (pCpu->hMemObj == NIL_RTR0MEMOBJ)
     973        return pCpu->fConfigured ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
     974
     975    int rc;
    925976    if (pCpu->fConfigured)
    926977    {
    927         rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
     978        void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     979        RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     980        rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
    928981        AssertRC(rc);
    929982        pCpu->fConfigured = false;
     
    935988    return rc;
    936989}
     990
    937991
    938992/**
     
    10031057        ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
    10041058
    1005     if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED)
     1059    if (HWACCMR0Globals.fEnabled)
    10061060    {
    10071061        int             rc;
     
    10411095        }
    10421096    }
     1097
    10431098    if (enmEvent == RTPOWEREVENT_RESUME)
    10441099        ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, false);
     
    10571112VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
    10581113{
    1059     int             rc;
    1060 
    10611114    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    10621115
     
    11131166        PVMCPU pVCpu = &pVM->aCpus[i];
    11141167
    1115         pVCpu->hwaccm.s.idEnteredCpu              = NIL_RTCPUID;
     1168        pVCpu->hwaccm.s.idEnteredCpu        = NIL_RTCPUID;
    11161169
    11171170        /* Invalidate the last cpu we were running on. */
    1118         pVCpu->hwaccm.s.idLastCpu                 = NIL_RTCPUID;
     1171        pVCpu->hwaccm.s.idLastCpu           = NIL_RTCPUID;
    11191172
    11201173        /* we'll aways increment this the first time (host uses ASID 0) */
    1121         pVCpu->hwaccm.s.uCurrentASID              = 0;
     1174        pVCpu->hwaccm.s.uCurrentASID        = 0;
    11221175    }
    11231176
     
    11251178    PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    11261179
    1127     /* Note: Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
     1180    /* Note: Not correct as we can be rescheduled to a different cpu, but the
     1181       fInUse case is mostly for debugging. */
    11281182    ASMAtomicWriteBool(&pCpu->fInUse, true);
    11291183    ASMSetFlags(fFlags);
    11301184
    11311185    /* Init a VT-x or AMD-V VM. */
    1132     rc = HWACCMR0Globals.pfnInitVM(pVM);
     1186    int rc = HWACCMR0Globals.pfnInitVM(pVM);
    11331187
    11341188    ASMAtomicWriteBool(&pCpu->fInUse, false);
     
    13021356VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
    13031357{
    1304     PCPUMCTX        pCtx;
    13051358    int             rc;
    13061359    RTCPUID         idCpu = RTMpCpuId();
    1307     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    1308 
     1360    PHWACCM_CPUINFO pCpu  = &HWACCMR0Globals.aCpuInfo[idCpu];
     1361    PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1362
     1363
     1364    /** @todo r=bird: This can't be entirely right? */
    13091365    AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    1310 
    1311     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    13121366
    13131367    /* Note:  It's rather tricky with longjmps done by e.g. Log statements or the page fault handler.
     
    13321386    if (    pVM->hwaccm.s.fNestedPaging
    13331387        &&  CPUMIsGuestInPagedProtectedModeEx(pCtx))
    1334     {
    13351388        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    1336     }
    13371389
    13381390    /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
    13391391#ifdef RT_STRICT
    1340     if (RT_UNLIKELY(    pVCpu->hwaccm.s.idEnteredCpu != idCpu
    1341                     &&  RT_FAILURE(rc)))
     1392    if (RT_UNLIKELY(   pVCpu->hwaccm.s.idEnteredCpu != idCpu
     1393                    && RT_FAILURE(rc)))
    13421394    {
    13431395        AssertMsgFailed(("Owner is %d, I'm %d", (int)pVCpu->hwaccm.s.idEnteredCpu, (int)idCpu));
     
    13721424VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
    13731425{
    1374     CPUMCTX *pCtx;
    1375     int      rc;
    13761426#ifdef VBOX_STRICT
    1377     RTCPUID  idCpu = RTMpCpuId(); NOREF(idCpu);
    1378     PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    1379 #endif
    1380 
     1427    PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[RTMpCpuId()];
    13811428    Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    1382     Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
     1429    Assert(pCpu->fConfigured);
    13831430    AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
    13841431    Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
     1432#endif
    13851433
    13861434#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    13881436#endif
    13891437
    1390     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    1391 
    1392     rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, pCtx);
     1438    int rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));
    13931439
    13941440#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    13981444}
    13991445
    1400 
    14011446#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1447
    14021448/**
    14031449 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
     
    14761522 * @returns cpu structure pointer
    14771523 */
    1478 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu()
    1479 {
    1480     RTCPUID  idCpu = RTMpCpuId();
    1481 
     1524VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu(void)
     1525{
     1526    RTCPUID idCpu = RTMpCpuId();
     1527    Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
    14821528    return &HWACCMR0Globals.aCpuInfo[idCpu];
    14831529}
     
    15361582}
    15371583
    1538 /**
    1539  * Disable VT-x if it's active *and* the current switcher turns off paging
     1584
     1585/**
     1586 * Raw-mode switcher hook - disable VT-x if it's active *and* the current
     1587 * switcher turns off paging.
    15401588 *
    15411589 * @returns VBox status code.
    15421590 * @param   pVM             VM handle.
    1543  * @param   pfVTxDisabled   VT-x was disabled or not (out)
     1591 * @param   pfVTxDisabled   VT-x was disabled or not (out).
    15441592 */
    15451593VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
     
    15491597    *pfVTxDisabled = false;
    15501598
    1551     if (    HWACCMR0Globals.enmHwAccmState != HWACCMSTATE_ENABLED
    1552         ||  !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */
    1553         ||  !HWACCMR0Globals.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
     1599    if (   !HWACCMR0Globals.fEnabled
     1600        || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */
     1601        || !HWACCMR0Globals.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
    15541602        return VINF_SUCCESS;    /* nothing to do */
    15551603
    15561604    switch(VMMGetSwitcher(pVM))
    15571605    {
    1558     case VMMSWITCHER_32_TO_32:
    1559     case VMMSWITCHER_PAE_TO_PAE:
    1560         return VINF_SUCCESS;    /* safe switchers as they don't turn off paging */
    1561 
    1562     case VMMSWITCHER_32_TO_PAE:
    1563     case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
    1564     case VMMSWITCHER_AMD64_TO_32:
    1565     case VMMSWITCHER_AMD64_TO_PAE:
    1566         break;                  /* unsafe switchers */
    1567 
    1568     default:
    1569         AssertFailed();
    1570         return VERR_INTERNAL_ERROR;
     1606        case VMMSWITCHER_32_TO_32:
     1607        case VMMSWITCHER_PAE_TO_PAE:
     1608            return VINF_SUCCESS;    /* safe switchers as they don't turn off paging */
     1609
     1610        case VMMSWITCHER_32_TO_PAE:
     1611        case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
     1612        case VMMSWITCHER_AMD64_TO_32:
     1613        case VMMSWITCHER_AMD64_TO_PAE:
     1614            break;                  /* unsafe switchers */
     1615
     1616        default:
     1617            AssertFailed();
     1618            return VERR_INTERNAL_ERROR;
    15711619    }
    15721620
    15731621    PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1574     void           *pvPageCpu;
    1575     RTHCPHYS        pPageCpuPhys;
    1576 
    1577     AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
    1578     pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
    1579     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
     1622    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR);
    15801623
    15811624    *pfVTxDisabled = true;
    1582     return VMXR0DisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
    1583 }
    1584 
    1585 /**
    1586  * Enable VT-x if was active *and* the current switcher turned off paging
     1625    void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     1626    RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     1627    return VMXR0DisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
     1628}
     1629
     1630/**
     1631 * Raw-mode switcher hook - re-enable VT-x if was active *and* the current
     1632 * switcher turned off paging.
    15871633 *
    15881634 * @returns VBox status code.
    1589  * @param   pVM          VM handle.
    1590  * @param   fVTxDisabled VT-x was disabled or not
     1635 * @param   pVM             VM handle.
     1636 * @param   fVTxDisabled    VT-x was disabled or not.
    15911637 */
    15921638VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
     
    15971643        return VINF_SUCCESS;    /* nothing to do */
    15981644
    1599     Assert(   HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED
    1600            && HWACCMR0Globals.vmx.fSupported
    1601            && HWACCMR0Globals.fGlobalInit);
     1645    Assert(HWACCMR0Globals.fEnabled);
     1646    Assert(HWACCMR0Globals.vmx.fSupported);
     1647    Assert(HWACCMR0Globals.fGlobalInit);
    16021648
    16031649    PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1604     void           *pvPageCpu;
    1605     RTHCPHYS        pPageCpuPhys;
    1606 
    1607     AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
    1608     pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
    1609     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
    1610 
    1611     return VMXR0EnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
     1650    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR);
     1651
     1652    void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     1653    RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     1654    return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
    16121655}
    16131656
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r35346 r37319  
    6464 * @param   pCpu            CPU info struct
    6565 * @param   pVM             The VM to operate on. (can be NULL after a resume!!)
    66  * @param   pvPageCpu       Pointer to the global cpu page
    67  * @param   pPageCpuPhys    Physical address of the global cpu page
     66 * @param   pvCpuPage       Pointer to the global cpu page.
     67 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    6868 */
    69 VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     69VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    7070{
    71     AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
    72     AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
    73 
    74     /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
    75     uint64_t val = ASMRdMsr(MSR_K6_EFER);
    76     if (val & MSR_K6_EFER_SVME)
    77     {
    78         /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active, then we blindly use AMD-V. */
     71    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     72    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
     73
     74    /* We must turn on AMD-V and setup the host state physical address, as
     75       those MSRs are per-cpu/core. */
     76    uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
     77    if (fEfer & MSR_K6_EFER_SVME)
     78    {
     79        /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active, then we
     80           blindly use AMD-V. */
    7981        if (    pVM
    8082            &&  pVM->hwaccm.s.svm.fIgnoreInUseError)
    81         {
    8283            pCpu->fIgnoreAMDVInUseError = true;
    83         }
    84 
    8584        if (!pCpu->fIgnoreAMDVInUseError)
    8685            return VERR_SVM_IN_USE;
     
    8887
    8988    /* Turn on AMD-V in the EFER MSR. */
    90     ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
    91 
    92     /* Write the physical page address where the CPU will store the host state while executing the VM. */
    93     ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
     89    ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
     90
     91    /* Write the physical page address where the CPU will store the host state
     92       while executing the VM. */
     93    ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
    9494
    9595    return VINF_SUCCESS;
     
    101101 * @returns VBox status code.
    102102 * @param   pCpu            CPU info struct
    103  * @param   pvPageCpu       Pointer to the global cpu page
    104  * @param   pPageCpuPhys    Physical address of the global cpu page
     103 * @param   pvCpuPage       Pointer to the global cpu page.
     104 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    105105 */
    106 VMMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     106VMMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    107107{
    108     AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
    109     AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     108    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     109    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    110110
    111111    /* Turn off AMD-V in the EFER MSR. */
    112     uint64_t val = ASMRdMsr(MSR_K6_EFER);
    113     ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
     112    uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
     113    ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
    114114
    115115    /* Invalidate host state physical address. */
     
    12151215    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    12161216
    1217     /** Set TLB flush state as checked until we return from the world switch. */
     1217    /* Set TLB flush state as checked until we return from the world switch. */
    12181218    ASMAtomicWriteU8(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
    12191219
     
    12221222        pVCpu->hwaccm.s.fForceTLBFlush = true;
    12231223
    1224     /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
     1224    /* Make sure we flush the TLB when required.  Switch ASID to achieve the
     1225       same thing, but without actually flushing the whole TLB (which is
     1226       expensive). */
    12251227    if (    pVCpu->hwaccm.s.fForceTLBFlush
    12261228        && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r35346 r37319  
    6767 * @param   pPageCpuPhys    Physical address of the global cpu page
    6868 */
    69 VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     69VMMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage);
    7070
    7171/**
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r36643 r37319  
    101101 * @param   pCpu            CPU info struct
    102102 * @param   pVM             The VM to operate on. (can be NULL after a resume!!)
    103  * @param   pvPageCpu       Pointer to the global cpu page
    104  * @param   pPageCpuPhys    Physical address of the global cpu page
     103 * @param   pvCpuPage       Pointer to the global cpu page.
     104 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    105105 */
    106 VMMR0DECL(int) VMXR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     106VMMR0DECL(int) VMXR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    107107{
    108     AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
    109     AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     108    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     109    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    110110
    111111    if (pVM)
    112112    {
    113113        /* Set revision dword at the beginning of the VMXON structure. */
    114         *(uint32_t *)pvPageCpu = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     114        *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
    115115    }
    116116
     
    125125    ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
    126126
    127     /* Enter VMX Root Mode */
    128     int rc = VMXEnable(pPageCpuPhys);
     127    /* Enter VMX Root Mode. */
     128    int rc = VMXEnable(HCPhysCpuPage);
    129129    if (RT_FAILURE(rc))
    130130    {
     
    140140 * @returns VBox status code.
    141141 * @param   pCpu            CPU info struct
    142  * @param   pvPageCpu       Pointer to the global cpu page
    143  * @param   pPageCpuPhys    Physical address of the global cpu page
     142 * @param   pvCpuPage       Pointer to the global cpu page.
     143 * @param   HCPhysCpuPage   Physical address of the global cpu page.
    144144 */
    145 VMMR0DECL(int) VMXR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     145VMMR0DECL(int) VMXR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    146146{
    147     AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
    148     AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     147    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     148    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    149149
    150150    /* If we're somehow not in VMX root mode, then we shouldn't dare leaving it. */
     
    155155    VMXDisable();
    156156
    157     /* And clear the X86_CR4_VMXE bit */
     157    /* And clear the X86_CR4_VMXE bit. */
    158158    ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
    159159    return VINF_SUCCESS;
     
    22592259            /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
    22602260            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    2261             for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++)
     2261            for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    22622262                vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
    22632263        }
     
    22762276        vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0);
    22772277
    2278 #ifdef VBOX_WITH_STATISTICS
     2278# ifdef VBOX_WITH_STATISTICS
    22792279    if (pVCpu->hwaccm.s.fForceTLBFlush)
    22802280        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
    22812281    else
    22822282        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
    2283 #endif
     2283# endif
    22842284}
    22852285#endif /* HWACCM_VTX_WITH_VPID */
     
    46164616    uint32_t        aParam[6];
    46174617    PHWACCM_CPUINFO pCpu;
    4618     RTHCPHYS        pPageCpuPhys;
     4618    RTHCPHYS        HCPhysCpuPage;
    46194619    int             rc;
    46204620
    46214621    pCpu = HWACCMR0GetCurrentCpu();
    4622     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
     4622    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    46234623
    46244624#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    46294629
    46304630#ifdef DEBUG
    4631     pCache->TestIn.pPageCpuPhys = 0;
     4631    pCache->TestIn.HCPhysCpuPage= 0;
    46324632    pCache->TestIn.pVMCSPhys    = 0;
    46334633    pCache->TestIn.pCache       = 0;
     
    46384638#endif
    46394639
    4640     aParam[0] = (uint32_t)(pPageCpuPhys);                                   /* Param 1: VMXON physical address - Lo. */
    4641     aParam[1] = (uint32_t)(pPageCpuPhys >> 32);                             /* Param 1: VMXON physical address - Hi. */
     4640    aParam[0] = (uint32_t)(HCPhysCpuPage);                                  /* Param 1: VMXON physical address - Lo. */
     4641    aParam[1] = (uint32_t)(HCPhysCpuPage >> 32);                            /* Param 1: VMXON physical address - Hi. */
    46424642    aParam[2] = (uint32_t)(pVCpu->hwaccm.s.vmx.pVMCSPhys);                  /* Param 2: VMCS physical address - Lo. */
    46434643    aParam[3] = (uint32_t)(pVCpu->hwaccm.s.vmx.pVMCSPhys >> 32);            /* Param 2: VMCS physical address - Hi. */
     
    46584658
    46594659#ifdef DEBUG
    4660     AssertMsg(pCache->TestIn.pPageCpuPhys == pPageCpuPhys, ("%RHp vs %RHp\n", pCache->TestIn.pPageCpuPhys, pPageCpuPhys));
     4660    AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
    46614661    AssertMsg(pCache->TestIn.pVMCSPhys    == pVCpu->hwaccm.s.vmx.pVMCSPhys, ("%RHp vs %RHp\n", pCache->TestIn.pVMCSPhys, pVCpu->hwaccm.s.vmx.pVMCSPhys));
    46624662    AssertMsg(pCache->TestIn.pVMCSPhys    == pCache->TestOut.pVMCSPhys, ("%RHp vs %RHp\n", pCache->TestIn.pVMCSPhys, pCache->TestOut.pVMCSPhys));
     
    46844684    int             rc, rc2;
    46854685    PHWACCM_CPUINFO pCpu;
    4686     RTHCPHYS        pPageCpuPhys;
     4686    RTHCPHYS        HCPhysCpuPage;
    46874687    RTHCUINTREG     uOldEFlags;
    46884688
     
    47044704
    47054705    pCpu = HWACCMR0GetCurrentCpu();
    4706     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
     4706    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    47074707
    47084708    /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
     
    47284728
    47294729    /* Enter VMX Root Mode */
    4730     rc2 = VMXEnable(pPageCpuPhys);
     4730    rc2 = VMXEnable(HCPhysCpuPage);
    47314731    if (RT_FAILURE(rc2))
    47324732    {
  • trunk/src/VBox/VMM/include/HWACCMInternal.h

    r37298 r37319  
    154154#define HWACCM_SSM_VERSION_2_0_X            3
    155155
    156 /* Per-cpu information. (host) */
     156/**
     157 * Global per-cpu information. (host)
     158 */
    157159typedef struct
    158160{
     161    /** The CPU ID. */
    159162    RTCPUID             idCpu;
    160 
    161     RTR0MEMOBJ          pMemObj;
     163    /** The memory object   */
     164    RTR0MEMOBJ          hMemObj;
    162165    /* Current ASID (AMD-V)/VPID (Intel) */
    163166    uint32_t            uCurrentASID;
     
    883886#ifdef IN_RING0
    884887
    885 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu();
     888VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu(void);
    886889VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
    887890
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette