VirtualBox

Changeset 7471 in vbox


Ignore:
Timestamp:
Mar 17, 2008 10:50:10 AM (17 years ago)
Author:
vboxsync
Message:

Rewrote VT-x & AMD-V mode changes. Requires the MP apis in our runtime to function properly. (only tested Windows)

Location:
trunk
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/hwaccm.h

    r5999 r7471  
    3030#include <VBox/types.h>
    3131#include <VBox/pgm.h>
     32#include <iprt/mp.h>
    3233
    3334
     
    3637 */
    3738
     39/**
     40 * HWACCM state
     41 */
     42typedef enum HWACCMSTATE
     43{
     44    /* Not yet set */
     45    HWACCMSTATE_UNINITIALIZED = 0,
     46    /* Enabled */
     47    HWACCMSTATE_ENABLED,
     48    /* Disabled */
     49    HWACCMSTATE_DISABLED,
     50    /** The usual 32-bit hack. */
     51    HWACCMSTATE_32BIT_HACK = 0x7fffffff
     52} HWACCMSTATE;
    3853
    3954__BEGIN_DECLS
     
    5469
    5570/**
    56  * Does Ring-0 HWACCM initialization.
     71 * Does global Ring-0 HWACCM initialization.
     72 *
     73 * @returns VBox status code.
     74 */
     75HWACCMR0DECL(int) HWACCMR0Init();
     76
     77/**
     78 * Does global Ring-0 HWACCM termination.
     79 *
     80 * @returns VBox status code.
     81 */
     82HWACCMR0DECL(int) HWACCMR0Term();
     83
     84/**
     85 * Does Ring-0 per VM HWACCM initialization.
    5786 *
    5887 * This is mainly to check that the Host CPU mode is compatible
     
    6291 * @param   pVM         The VM to operate on.
    6392 */
    64 HWACCMR0DECL(int) HWACCMR0Init(PVM pVM);
     93HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM);
     94
     95/**
     96 * Sets up HWACCM on all cpus.
     97 *
     98 * @returns VBox status code.
     99 * @param   pVM                 The VM to operate on.
     100 * @param   enmNewHwAccmState   New hwaccm state
     101 *
     102 */
     103HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState);
    65104
    66105/** @} */
     
    91130
    92131/**
     132 * Initialize VT-x or AMD-V
     133 *
     134 * @returns VBox status code.
     135 * @param   pVM         The VM handle.
     136 */
     137HWACCMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM);
     138
     139/**
    93140 * Applies relocations to data and code managed by this
    94141 * component. This function will be called at init and
     
    165212
    166213/**
    167  * Does Ring-0 VMX initialization.
    168  *
    169  * @returns VBox status code.
    170  * @param   pVM         The VM to operate on.
    171  */
    172 HWACCMR0DECL(int) HWACCMR0SetupVMX(PVM pVM);
     214 * Sets up a VT-x or AMD-V session
     215 *
     216 * @returns VBox status code.
     217 * @param   pVM         The VM to operate on.
     218 */
     219HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM);
    173220
    174221
     
    182229
    183230/**
    184  * Enable VMX or SVN
    185  *
    186  * @returns VBox status code.
    187  * @param   pVM         The VM to operate on.
    188  */
    189 HWACCMR0DECL(int) HWACCMR0Enable(PVM pVM);
    190 
    191 
    192 /**
    193  * Disable VMX or SVN
    194  *
    195  * @returns VBox status code.
    196  * @param   pVM         The VM to operate on.
    197  */
    198 HWACCMR0DECL(int) HWACCMR0Disable(PVM pVM);
     231 * Enters the VT-x or AMD-V session
     232 *
     233 * @returns VBox status code.
     234 * @param   pVM         The VM to operate on.
     235 */
     236HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM);
     237
     238
     239/**
     240 * Leaves the VT-x or AMD-V session
     241 *
     242 * @returns VBox status code.
     243 * @param   pVM         The VM to operate on.
     244 */
     245HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM);
    199246
    200247
  • trunk/src/VBox/VMM/HWACCM.cpp

    r5999 r7471  
    9191        return rc;
    9292
    93     /** @todo Make sure both pages are either not accessible or readonly! */
    94     /* Allocate one page for VMXON. */
    95     pVM->hwaccm.s.vmx.pVMXON = SUPContAlloc(1, &pVM->hwaccm.s.vmx.pVMXONPhys);
    96     if (pVM->hwaccm.s.vmx.pVMXON == 0)
    97     {
    98         AssertMsgFailed(("SUPContAlloc failed!!\n"));
    99         return VERR_NO_MEMORY;
    100     }
    101     memset(pVM->hwaccm.s.vmx.pVMXON, 0, PAGE_SIZE);
    102 
    10393    /* Allocate one page for the VM control structure (VMCS). */
    10494    pVM->hwaccm.s.vmx.pVMCS = SUPContAlloc(1, &pVM->hwaccm.s.vmx.pVMCSPhys);
     
    121111
    122112    /* Reuse those three pages for AMD SVM. (one is active; never both) */
    123     pVM->hwaccm.s.svm.pHState       = pVM->hwaccm.s.vmx.pVMXON;
    124     pVM->hwaccm.s.svm.pHStatePhys   = pVM->hwaccm.s.vmx.pVMXONPhys;
    125113    pVM->hwaccm.s.svm.pVMCB         = pVM->hwaccm.s.vmx.pVMCS;
    126114    pVM->hwaccm.s.svm.pVMCBPhys     = pVM->hwaccm.s.vmx.pVMCSPhys;
     
    262250
    263251/**
    264  * Applies relocations to data and code managed by this
    265  * component. This function will be called at init and
    266  * whenever the VMM need to relocate it self inside the GC.
    267  *
    268  * @param   pVM     The VM.
    269  */
    270 HWACCMR3DECL(void) HWACCMR3Relocate(PVM pVM)
    271 {
    272 #ifdef LOG_ENABLED
    273     Log(("HWACCMR3Relocate to %VGv\n", MMHyperGetArea(pVM, 0)));
    274 #endif
     252 * Initialize VT-x or AMD-V.
     253 *
     254 * @returns VBox status code.
     255 * @param   pVM         The VM handle.
     256 */
     257HWACCMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
     258{
     259    int rc;
     260
     261    /*
     262     * Note that we have a global setting for VT-x/AMD-V usage. VMX root mode changes the way the CPU operates. Our 64 bits switcher will trap
     263     * because it turns off paging, which is not allowed in VMX root mode.
     264     *
     265     * To simplify matters we'll just force all running VMs to either use raw or hwaccm mode. No mixing allowed.
     266     *
     267     */
     268
     269    /* If we enabled or disabled hwaccm mode, then it can't be changed until all the VMs are shutdown. */
     270    rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_ENABLE, (pVM->hwaccm.s.fAllowed) ? HWACCMSTATE_ENABLED : HWACCMSTATE_DISABLED, NULL);
     271    if (VBOX_FAILURE(rc))
     272    {
     273        LogRel(("HWACCMR3InitFinalize: SUPCallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Vrc\n", rc));
     274        LogRel(("HWACCMR3InitFinalize: disallowed %s of HWACCM\n", pVM->hwaccm.s.fAllowed ? "enabling" : "disabling"));
     275        /* Invert the selection */
     276        pVM->hwaccm.s.fAllowed ^= 1;
     277        LogRel(("HWACCMR3InitFinalize: new HWACCM status = %s\n", pVM->hwaccm.s.fAllowed ? "enabled" : "disabled"));
     278    }
    275279
    276280    if (pVM->hwaccm.s.fAllowed == false)
    277         return ;
     281        return VINF_SUCCESS;    /* disabled */
     282
     283    Assert(!pVM->fHWACCMEnabled);
    278284
    279285    if (pVM->hwaccm.s.vmx.fSupported)
     
    424430            memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
    425431
    426             int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
     432            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
    427433            AssertRC(rc);
    428434            if (rc == VINF_SUCCESS)
     
    458464            pVM->hwaccm.s.fInitialized = true;
    459465
    460             int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
     466            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
    461467            AssertRC(rc);
    462468            if (rc == VINF_SUCCESS)
     
    480486        LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
    481487    }
    482 
     488    return VINF_SUCCESS;
     489}
     490
     491/**
     492 * Applies relocations to data and code managed by this
     493 * component. This function will be called at init and
     494 * whenever the VMM need to relocate it self inside the GC.
     495 *
     496 * @param   pVM     The VM.
     497 */
     498HWACCMR3DECL(void) HWACCMR3Relocate(PVM pVM)
     499{
     500    Log(("HWACCMR3Relocate to %VGv\n", MMHyperGetArea(pVM, 0)));
     501    return;
    483502}
    484503
     
    526545    }
    527546
    528     if (pVM->hwaccm.s.vmx.pVMXON)
    529     {
    530         SUPContFree(pVM->hwaccm.s.vmx.pVMXON, 1);
    531         pVM->hwaccm.s.vmx.pVMXON = 0;
    532     }
    533547    if (pVM->hwaccm.s.vmx.pVMCS)
    534548    {
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r5999 r7471  
    154154        void                       *pVMCS;
    155155
    156         /** Physical address of the VMXON page. */
    157         RTHCPHYS                    pVMXONPhys;
    158         /** Virtual address of the VMXON page. */
    159         void                       *pVMXON;
    160 
    161156        /** Physical address of the TSS page used for real mode emulation. */
    162157        RTHCPHYS                    pRealModeTSSPhys;
     
    165160
    166161        /** Host CR4 value (set by ring-0 VMX init) */
    167         uint32_t                    hostCR4;
     162        uint64_t                    hostCR4;
    168163
    169164        /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
     
    216211        /** Virtual address of the host VM control block (VMCB). */
    217212        void                       *pVMCBHost;
    218 
    219         /** Physical address of the Host State page. */
    220         RTHCPHYS                    pHStatePhys;
    221         /** Virtual address of the Host State page. */
    222         void                       *pHState;
    223213
    224214        /** Physical address of the IO bitmap (12kb). */
  • trunk/src/VBox/VMM/VM.cpp

    r6799 r7471  
    694694    if (VBOX_SUCCESS(rc))
    695695        rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
     696
     697    /** todo: move this to the VMINITCOMPLETED_RING0 notification handler once implemented */
     698    if (VBOX_SUCCESS(rc))
     699        rc = HWACCMR3InitFinalizeR0(pVM);
     700
    696701    LogFlow(("vmR3InitRing0: returns %Vrc\n", rc));
    697702    return rc;
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r7105 r7471  
    3636#include <iprt/assert.h>
    3737#include <iprt/asm.h>
     38#include <iprt/string.h>
     39#include <iprt/memobj.h>
     40#include <iprt/cpuset.h>
    3841#include "HWVMXR0.h"
    3942#include "HWSVMR0.h"
    4043
    41 /**
    42  * Does Ring-0 HWACCM initialization.
    43  *
    44  * This is mainly to check that the Host CPU mode is compatible
    45  * with VMX.
     44/*******************************************************************************
     45*   Internal Functions                                                         *
     46*******************************************************************************/
     47static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     48static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
     49
     50/*******************************************************************************
     51*   Local Variables                                                            *
     52*******************************************************************************/
     53static struct
     54{
     55    struct
     56    {
     57        RTR0MEMOBJ  pMemObj;
     58        bool        fVMXConfigured;
     59        bool        fSVMConfigured;
     60    } aCpuInfo[RTCPUSET_MAX_CPUS];
     61
     62    struct
     63    {
     64        /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
     65        bool                        fSupported;
     66
     67        /** Host CR4 value (set by ring-0 VMX init) */
     68        uint64_t                    hostCR4;
     69
     70        /** VMX MSR values */
     71        struct
     72        {
     73            uint64_t                feature_ctrl;
     74            uint64_t                vmx_basic_info;
     75            uint64_t                vmx_pin_ctls;
     76            uint64_t                vmx_proc_ctls;
     77            uint64_t                vmx_exit;
     78            uint64_t                vmx_entry;
     79            uint64_t                vmx_misc;
     80            uint64_t                vmx_cr0_fixed0;
     81            uint64_t                vmx_cr0_fixed1;
     82            uint64_t                vmx_cr4_fixed0;
     83            uint64_t                vmx_cr4_fixed1;
     84            uint64_t                vmx_vmcs_enum;
     85        } msr;
     86        /* Last instruction error */
     87        uint32_t                    ulLastInstrError;
     88    } vmx;
     89    struct
     90    {
     91        /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
     92        bool                        fSupported;
     93
     94        /** SVM revision. */
     95        uint32_t                    u32Rev;
     96
     97        /** Maximum ASID allowed. */
     98        uint32_t                    u32MaxASID;
     99    } svm;
     100    /** Saved error from detection */
     101    int32_t         lLastError;
     102
     103    struct
     104    {
     105        uint32_t                    u32AMDFeatureECX;
     106        uint32_t                    u32AMDFeatureEDX;
     107    } cpuid;
     108
     109    HWACCMSTATE     enmHwAccmState;
     110} HWACCMR0Globals;
     111
     112
     113
     114/**
     115 * Does global Ring-0 HWACCM initialization.
    46116 *
    47117 * @returns VBox status code.
    48  * @param   pVM         The VM to operate on.
    49  */
    50 HWACCMR0DECL(int) HWACCMR0Init(PVM pVM)
    51 {
    52     LogComFlow(("HWACCMR0Init: %p\n", pVM));
    53 
    54     pVM->hwaccm.s.vmx.fSupported = false;;
    55     pVM->hwaccm.s.svm.fSupported = false;;
     118 */
     119HWACCMR0DECL(int) HWACCMR0Init()
     120{
     121    int        rc;
     122    RTR0MEMOBJ pScatchMemObj;
     123    void      *pvScatchPage;
     124    RTHCPHYS   pScatchPagePhys;
     125
     126    memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
     127    HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
     128
     129    rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     130    if (RT_FAILURE(rc))
     131        return rc;
     132
     133    pvScatchPage    = RTR0MemObjAddress(pScatchMemObj);
     134    pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
     135    memset(pvScatchPage, 0, PAGE_SIZE);
     136
     137    /* Assume success */
     138    rc = VINF_SUCCESS;
    56139
    57140#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
    58141
    59     pVM->hwaccm.s.fHWACCMR0Init = true;
    60     pVM->hwaccm.s.lLastError    = VINF_SUCCESS;
    61 
    62142    /*
    63      * Check for VMX capabilities
     143     * Check for VT-x and AMD-V capabilities
    64144     */
    65145    if (ASMHasCpuId())
     
    70150        uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
    71151
     152        /* Make sure we don't get rescheduled to another cpu during this probe. */
     153        RTCCUINTREG fFlags = ASMIntDisableFlags();
     154
    72155        ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
    73156        ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
    74157        /* Query AMD features. */
    75         ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &pVM->hwaccm.s.cpuid.u32AMDFeatureECX, &pVM->hwaccm.s.cpuid.u32AMDFeatureEDX);
     158        ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
    76159
    77160        if (    u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
     
    89172               )
    90173            {
    91                 pVM->hwaccm.s.vmx.msr.feature_ctrl    = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     174                HWACCMR0Globals.vmx.msr.feature_ctrl    = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    92175                /*
    93176                 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
     
    95178                 */
    96179                /** @todo need to check this for each cpu/core in the system!!!) */
    97                 if (!(pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
     180                if (!(HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
    98181                {
    99182                    /* MSR is not yet locked; we can change it ourselves here */
    100                     pVM->hwaccm.s.vmx.msr.feature_ctrl |= (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK);
    101                     ASMWrMsr(MSR_IA32_FEATURE_CONTROL, pVM->hwaccm.s.vmx.msr.feature_ctrl);
     183                    HWACCMR0Globals.vmx.msr.feature_ctrl |= (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK);
     184                    ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl);
    102185                }
    103186
    104                 if (   (pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
     187                if (   (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    105188                                                          == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
    106189                {
    107                     pVM->hwaccm.s.vmx.fSupported          = true;
    108                     pVM->hwaccm.s.vmx.msr.vmx_basic_info  = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
    109                     pVM->hwaccm.s.vmx.msr.vmx_pin_ctls    = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    110                     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls   = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    111                     pVM->hwaccm.s.vmx.msr.vmx_exit        = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    112                     pVM->hwaccm.s.vmx.msr.vmx_entry       = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    113                     pVM->hwaccm.s.vmx.msr.vmx_misc        = ASMRdMsr(MSR_IA32_VMX_MISC);
    114                     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
    115                     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
    116                     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
    117                     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    118                     pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum   = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     190                    HWACCMR0Globals.vmx.fSupported          = true;
     191                    HWACCMR0Globals.vmx.msr.vmx_basic_info  = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
     192                    HWACCMR0Globals.vmx.msr.vmx_pin_ctls    = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
     193                    HWACCMR0Globals.vmx.msr.vmx_proc_ctls   = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
     194                    HWACCMR0Globals.vmx.msr.vmx_exit        = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
     195                    HWACCMR0Globals.vmx.msr.vmx_entry       = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
     196                    HWACCMR0Globals.vmx.msr.vmx_misc        = ASMRdMsr(MSR_IA32_VMX_MISC);
     197                    HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
     198                    HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
     199                    HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
     200                    HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
     201                    HWACCMR0Globals.vmx.msr.vmx_vmcs_enum   = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
    119202
    120203                    /*
    121204                     * Check CR4.VMXE
    122205                     */
    123                     pVM->hwaccm.s.vmx.hostCR4 = ASMGetCR4();
    124                     if (!(pVM->hwaccm.s.vmx.hostCR4 & X86_CR4_VMXE))
     206                    HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
     207                    if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
    125208                    {
    126209                        /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
    127210                         * try to execute the VMX instructions...
    128211                         */
    129                         ASMSetCR4(pVM->hwaccm.s.vmx.hostCR4 | X86_CR4_VMXE);
     212                        ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
    130213                    }
    131214
    132                     if (    pVM->hwaccm.s.vmx.pVMXONPhys
    133                         &&  pVM->hwaccm.s.vmx.pVMXON)
     215                    /* Set revision dword at the beginning of the structure. */
     216                    *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
     217
     218#if HC_ARCH_BITS == 64
     219                    /* Enter VMX Root Mode */
     220                    rc = VMXEnable(pScatchPagePhys);
     221                    if (VBOX_FAILURE(rc))
    134222                    {
    135                         /* Set revision dword at the beginning of the structure. */
    136                         *(uint32_t *)pVM->hwaccm.s.vmx.pVMXON = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
    137 
    138 #if HC_ARCH_BITS == 64
    139                         /* Enter VMX Root Mode */
    140                         int rc = VMXEnable(pVM->hwaccm.s.vmx.pVMXONPhys);
    141                         if (VBOX_FAILURE(rc))
    142                         {
    143                             /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
    144                              * (a) clearing X86_CR4_VMXE in CR4 causes a #GP    (we no longer modify this bit)
    145                              * (b) turning off paging causes a #GP              (unavoidable when switching from long to 32 bits mode)
    146                              *
    147                              * They should fix their code, but until they do we simply refuse to run.
    148                              */
    149                             return VERR_VMX_IN_VMX_ROOT_MODE;
    150                         }
     223                        /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
     224                         * (a) clearing X86_CR4_VMXE in CR4 causes a #GP    (we no longer modify this bit)
     225                         * (b) turning off paging causes a #GP              (unavoidable when switching from long to 32 bits mode)
     226                         *
     227                         * They should fix their code, but until they do we simply refuse to run.
     228                         */
     229                        rc = VERR_VMX_IN_VMX_ROOT_MODE;
     230                    }
     231                    else
    151232                        VMXDisable();
    152233#endif
    153                     }
    154234                    /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
    155                     ASMSetCR4(pVM->hwaccm.s.vmx.hostCR4);
     235                    ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
    156236                }
    157237                else
    158                     pVM->hwaccm.s.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
     238                    HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
    159239            }
    160240            else
    161                 pVM->hwaccm.s.lLastError = VERR_VMX_NO_VMX;
     241                HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
    162242        }
    163243        else
     
    171251             * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
    172252             */
    173             if (   (pVM->hwaccm.s.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
     253            if (   (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
    174254                && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
    175255                && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
     
    193273                    {
    194274                        /* Query AMD features. */
    195                         ASMCpuId(0x8000000A, &pVM->hwaccm.s.svm.u32Rev, &pVM->hwaccm.s.svm.u32MaxASID, &u32Dummy, &u32Dummy);
    196 
    197                         pVM->hwaccm.s.svm.fSupported = true;
     275                        ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
     276
     277                        HWACCMR0Globals.svm.fSupported = true;
    198278                    }
    199279                    else
    200280                    {
    201                         pVM->hwaccm.s.lLastError = VERR_SVM_ILLEGAL_EFER_MSR;
     281                        HWACCMR0Globals.lLastError = VERR_SVM_ILLEGAL_EFER_MSR;
    202282                        AssertFailed();
    203283                    }
    204284                }
    205285                else
    206                     pVM->hwaccm.s.lLastError = VERR_SVM_DISABLED;
     286                    HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
    207287            }
    208288            else
    209                 pVM->hwaccm.s.lLastError = VERR_SVM_NO_SVM;
     289                HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
    210290        }
    211291        else
    212             pVM->hwaccm.s.lLastError = VERR_HWACCM_UNKNOWN_CPU;
    213     }
    214     else
    215         pVM->hwaccm.s.lLastError = VERR_HWACCM_NO_CPUID;
     292            HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
     293
     294        ASMSetFlags(fFlags);
     295    }
     296    else
     297        HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
    216298
    217299#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
    218300
    219     return VINF_SUCCESS;
    220 }
    221 
    222 
    223 /**
    224  * Sets up and activates VMX
     301    RTR0MemObjFree(pScatchMemObj, false);
     302    return rc;
     303}
     304
     305/**
     306 * Does global Ring-0 HWACCM termination.
     307 *
     308 * @returns VBox status code.
     309 */
     310HWACCMR0DECL(int) HWACCMR0Term()
     311{
     312    int aRc[RTCPUSET_MAX_CPUS];
     313
     314    memset(aRc, 0, sizeof(aRc));
     315    int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
     316    AssertRC(rc);
     317
     318    /* Free the per-cpu pages used for VT-x and AMD-V */
     319    for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
     320    {
     321        AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
     322        if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
     323        {
     324            RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
     325            HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
     326        }
     327    }
     328    return rc;
     329}
     330
     331/**
     332 * Sets up HWACCM on all cpus.
     333 *
     334 * @returns VBox status code.
     335 * @param   pVM                 The VM to operate on.
     336 * @param   enmNewHwAccmState   New hwaccm state
     337 *
     338 */
     339HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
     340{
     341    Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
     342    if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
     343    {
     344        int aRc[RTCPUSET_MAX_CPUS];
     345        memset(aRc, 0, sizeof(aRc));
     346
     347        /* Allocate one page per cpu for the global vt-x and amd-v pages */
     348        for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
     349        {
     350            Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
     351
     352            /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
     353            if (RTMpIsCpuOnline(i))
     354            {
     355                int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     356                if (RT_FAILURE(rc))
     357                    return rc;
     358
     359                void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
     360                memset(pvR0, 0, PAGE_SIZE);
     361            }
     362        }
     363
     364        /* First time, so initialize each cpu/core */
     365        int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
     366        if (VBOX_SUCCESS(rc))
     367        {
     368            for (unsigned i=0;i<RT_ELEMENTS(aRc);i++)
     369            {
     370                if (RTMpIsCpuOnline(i))
     371                {
     372                    AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0EnableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
     373                    if (VBOX_FAILURE(aRc[i]))
     374                    {
     375                        rc = aRc[i];
     376                        break;
     377                    }
     378                }
     379            }
     380        }
     381        return rc;
     382    }
     383
     384    if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
     385        return VINF_SUCCESS;
     386
     387    /* Request to change the mode is not allowed */
     388    return VERR_ACCESS_DENIED;
     389}
     390
     391/**
     392 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
     393 * is to be called on the target cpus.
     394 *
     395 * @param   idCpu       The identifier for the CPU the function is called on.
     396 * @param   pvUser1     The 1st user argument.
     397 * @param   pvUser2     The 2nd user argument.
     398 */
     399static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     400{
     401    PVM      pVM = (PVM)pvUser1;
     402    int     *paRc = (int *)pvUser2;
     403    void    *pvPageCpu;
     404    RTHCPHYS pPageCpuPhys;
     405
     406    Assert(pVM);
     407    Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
     408
     409    /* Should never happen */
     410    if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
     411    {
     412        AssertFailed();
     413        return;
     414    }
     415
     416    pvPageCpu    = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
     417    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
     418
     419    if (pVM->hwaccm.s.vmx.fSupported)
     420    {
     421        paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
     422        if (VBOX_SUCCESS(paRc[idCpu]))
     423            HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
     424    }
     425    else
     426    {
     427        Assert(pVM->hwaccm.s.svm.fSupported);
     428        paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
     429        if (VBOX_SUCCESS(paRc[idCpu]))
     430            HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
     431    }
     432    return;
     433}
     434
     435/**
     436 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
     437 * is to be called on the target cpus.
     438 *
     439 * @param   idCpu       The identifier for the CPU the function is called on.
     440 * @param   pvUser1     The 1st user argument.
     441 * @param   pvUser2     The 2nd user argument.
     442 */
     443static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     444{
     445    void    *pvPageCpu;
     446    RTHCPHYS pPageCpuPhys;
     447    int     *paRc = (int *)pvUser1;
     448
     449    Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
     450
     451    /* Should never happen */
     452    if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
     453    {
     454        AssertFailed();
     455        return;
     456    }
     457
     458    pvPageCpu    = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
     459    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
     460
     461    if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
     462    {
     463        paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
     464        AssertRC(paRc[idCpu]);
     465        HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
     466    }
     467    else
     468    if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
     469    {
     470        paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
     471        AssertRC(paRc[idCpu]);
     472        HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
     473    }
     474    return;
     475}
     476
     477
     478/**
     479 * Does Ring-0 per VM HWACCM initialization.
     480 *
     481 * This is mainly to check that the Host CPU mode is compatible
     482 * with VMX.
    225483 *
    226484 * @returns VBox status code.
    227485 * @param   pVM         The VM to operate on.
    228486 */
    229 HWACCMR0DECL(int) HWACCMR0SetupVMX(PVM pVM)
     487HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
     488{
     489    LogComFlow(("HWACCMR0Init: %p\n", pVM));
     490
     491    pVM->hwaccm.s.vmx.fSupported = false;
     492    pVM->hwaccm.s.svm.fSupported = false;
     493
     494    if (HWACCMR0Globals.vmx.fSupported)
     495    {
     496        pVM->hwaccm.s.vmx.fSupported            = true;
     497        pVM->hwaccm.s.vmx.hostCR4               = HWACCMR0Globals.vmx.hostCR4;
     498        pVM->hwaccm.s.vmx.msr.feature_ctrl      = HWACCMR0Globals.vmx.msr.feature_ctrl;
     499        pVM->hwaccm.s.vmx.msr.vmx_basic_info    = HWACCMR0Globals.vmx.msr.vmx_basic_info;
     500        pVM->hwaccm.s.vmx.msr.vmx_pin_ctls      = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
     501        pVM->hwaccm.s.vmx.msr.vmx_proc_ctls     = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
     502        pVM->hwaccm.s.vmx.msr.vmx_exit          = HWACCMR0Globals.vmx.msr.vmx_exit;
     503        pVM->hwaccm.s.vmx.msr.vmx_entry         = HWACCMR0Globals.vmx.msr.vmx_entry;
     504        pVM->hwaccm.s.vmx.msr.vmx_misc          = HWACCMR0Globals.vmx.msr.vmx_misc;
     505        pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0    = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
     506        pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1    = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
     507        pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0    = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
     508        pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1    = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
     509        pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum     = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
     510
     511    }
     512    else
     513    if (HWACCMR0Globals.svm.fSupported)
     514    {
     515        pVM->hwaccm.s.svm.fSupported            = true;
     516        pVM->hwaccm.s.svm.u32Rev                = HWACCMR0Globals.svm.u32Rev;
     517        pVM->hwaccm.s.svm.u32MaxASID            = HWACCMR0Globals.svm.u32MaxASID;
     518    }
     519
     520    pVM->hwaccm.s.lLastError                = HWACCMR0Globals.lLastError;
     521    pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
     522    pVM->hwaccm.s.cpuid.u32AMDFeatureEDX    = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
     523
     524    return VINF_SUCCESS;
     525}
     526
     527
     528
     529/**
     530 * Sets up a VT-x or AMD-V session
     531 *
     532 * @returns VBox status code.
     533 * @param   pVM         The VM to operate on.
     534 */
     535HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
    230536{
    231537    int rc = VINF_SUCCESS;
     
    236542    /* Setup Intel VMX. */
    237543    if (pVM->hwaccm.s.vmx.fSupported)
    238         rc = VMXR0Setup(pVM);
    239     else
    240         rc = SVMR0Setup(pVM);
     544        rc = VMXR0SetupVM(pVM);
     545    else
     546        rc = SVMR0SetupVM(pVM);
    241547
    242548    return rc;
     
    245551
    246552/**
    247  * Enable VMX or SVN
     553 * Enters the VT-x or AMD-V session
    248554 *
    249555 * @returns VBox status code.
    250556 * @param   pVM         The VM to operate on.
    251557 */
    252 HWACCMR0DECL(int) HWACCMR0Enable(PVM pVM)
     558HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
    253559{
    254560    CPUMCTX *pCtx;
     
    267573    if (pVM->hwaccm.s.vmx.fSupported)
    268574    {
    269         rc  = VMXR0Enable(pVM);
     575        rc  = VMXR0Enter(pVM);
    270576        AssertRC(rc);
    271577        rc |= VMXR0SaveHostState(pVM);
     
    279585    {
    280586        Assert(pVM->hwaccm.s.svm.fSupported);
    281         rc  = SVMR0Enable(pVM);
     587        rc  = SVMR0Enter(pVM);
    282588        AssertRC(rc);
    283589        rc |= SVMR0LoadGuestState(pVM, pCtx);
     
    292598
    293599/**
    294  * Disable VMX or SVN
     600 * Leaves the VT-x or AMD-V session
    295601 *
    296602 * @returns VBox status code.
    297603 * @param   pVM         The VM to operate on.
    298604 */
    299 HWACCMR0DECL(int) HWACCMR0Disable(PVM pVM)
     605HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
    300606{
    301607    CPUMCTX *pCtx;
     
    323629    if (pVM->hwaccm.s.vmx.fSupported)
    324630    {
    325         return VMXR0Disable(pVM);
     631        return VMXR0Leave(pVM);
    326632    }
    327633    else
    328634    {
    329635        Assert(pVM->hwaccm.s.svm.fSupported);
    330         return SVMR0Disable(pVM);
     636        return SVMR0Leave(pVM);
    331637    }
    332638}
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r5999 r7471  
    4343
    4444/**
    45  * Sets up and activates SVM
     45 * Sets up and activates AMD-V on the current CPU
     46 *
     47 * @returns VBox status code.
     48 * @param   idCpu           The identifier for the CPU the function is called on.
     49 * @param   pVM             The VM to operate on.
     50 * @param   pvPageCpu       Pointer to the global cpu page
     51 * @param   pPageCpuPhys    Physical address of the global cpu page
     52 */
     53HWACCMR0DECL(int) SVMR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     54{
     55    AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
     56    AssertReturn(pVM, VERR_INVALID_PARAMETER);
     57    AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     58
     59    /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
     60
     61    /* Turn on AMD-V in the EFER MSR. */
     62    uint64_t val = ASMRdMsr(MSR_K6_EFER);
     63    if (!(val & MSR_K6_EFER_SVME))
     64        ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
     65
     66    /* Write the physical page address where the CPU will store the host state while executing the VM. */
     67    ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
     68    return VINF_SUCCESS;
     69}
     70
     71/**
     72 * Deactivates AMD-V on the current CPU
     73 *
     74 * @returns VBox status code.
     75 * @param   idCpu           The identifier for the CPU the function is called on.
     76 * @param   pvPageCpu       Pointer to the global cpu page
     77 * @param   pPageCpuPhys    Physical address of the global cpu page
     78 */
     79HWACCMR0DECL(int) SVMR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     80{
     81    AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
     82    AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     83
     84    /* Turn off AMD-V in the EFER MSR. */
     85    uint64_t val = ASMRdMsr(MSR_K6_EFER);
     86    ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
     87
     88    /* Invalidate host state physical address. */
     89    ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
     90    return VINF_SUCCESS;
     91}
     92
     93/**
     94 * Sets up SVM for the specified VM
    4695 *
    4796 * @returns VBox status code.
    4897 * @param   pVM         The VM to operate on.
    4998 */
    50 HWACCMR0DECL(int) SVMR0Setup(PVM pVM)
     99HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM)
    51100{
    52101    int         rc = VINF_SUCCESS;
     
    14131462
    14141463/**
    1415  * Enable SVM
     1464 * Enters the AMD-V session
    14161465 *
    14171466 * @returns VBox status code.
    14181467 * @param   pVM         The VM to operate on.
    14191468 */
    1420 HWACCMR0DECL(int) SVMR0Enable(PVM pVM)
     1469HWACCMR0DECL(int) SVMR0Enter(PVM pVM)
    14211470{
    14221471    uint64_t val;
     
    14241473    Assert(pVM->hwaccm.s.svm.fSupported);
    14251474
    1426     /* We must turn on SVM and setup the host state physical address, as those MSRs are per-cpu/core. */
    1427 
    1428     /* Turn on SVM in the EFER MSR. */
    1429     val = ASMRdMsr(MSR_K6_EFER);
    1430     if (!(val & MSR_K6_EFER_SVME))
    1431         ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
    1432 
    1433     /* Write the physical page address where the CPU will store the host state while executing the VM. */
    1434     ASMWrMsr(MSR_K8_VM_HSAVE_PA, pVM->hwaccm.s.svm.pHStatePhys);
    1435 
    14361475    /* Force a TLB flush on VM entry. */
    14371476    pVM->hwaccm.s.svm.fResumeVM = false;
     
    14451484
    14461485/**
    1447  * Disable SVM
     1486 * Leaves the AMD-V session
    14481487 *
    14491488 * @returns VBox status code.
    14501489 * @param   pVM         The VM to operate on.
    14511490 */
    1452 HWACCMR0DECL(int) SVMR0Disable(PVM pVM)
     1491HWACCMR0DECL(int) SVMR0Leave(PVM pVM)
    14531492{
    1454     /** @todo hopefully this is not very expensive. */
    1455 
    1456     /* Turn off SVM in the EFER MSR. */
    1457     uint64_t val = ASMRdMsr(MSR_K6_EFER);
    1458     ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
    1459 
    1460     /* Invalidate host state physical address. */
    1461     ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
    1462 
    14631493    Assert(pVM->hwaccm.s.svm.fSupported);
    14641494    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r5999 r7471  
    11/* $Id$ */
    22/** @file
    3  * HWACCM SVM - Internal header file.
     3 * HWACCM AMD-V - Internal header file.
    44 */
    55
     
    3939
    4040/**
    41  * Enable SVM
     41 * Enters the AMD-V session
    4242 *
    4343 * @returns VBox status code.
    4444 * @param   pVM         The VM to operate on.
    4545 */
    46 HWACCMR0DECL(int) SVMR0Enable(PVM pVM);
     46HWACCMR0DECL(int) SVMR0Enter(PVM pVM);
    4747
    4848/**
    49  * Disable SVM
     49 * Leaves the AMD-V session
    5050 *
    5151 * @returns VBox status code.
    5252 * @param   pVM         The VM to operate on.
    5353 */
    54 HWACCMR0DECL(int) SVMR0Disable(PVM pVM);
     54HWACCMR0DECL(int) SVMR0Leave(PVM pVM);
    5555
    5656/**
    57  * Sets up and activates SVM
     57 * Sets up and activates AMD-V on the current CPU
     58 *
     59 * @returns VBox status code.
     60 * @param   idCpu           The identifier for the CPU the function is called on.
     61 * @param   pVM             The VM to operate on.
     62 * @param   pvPageCpu       Pointer to the global cpu page
     63 * @param   pPageCpuPhys    Physical address of the global cpu page
     64 */
     65HWACCMR0DECL(int) SVMR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     66
     67/**
     68 * Deactivates AMD-V on the current CPU
     69 *
     70 * @returns VBox status code.
     71 * @param   idCpu           The identifier for the CPU the function is called on.
     72 * @param   pvPageCpu       Pointer to the global cpu page
     73 * @param   pPageCpuPhys    Physical address of the global cpu page
     74 */
     75HWACCMR0DECL(int) SVMR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     76
     77/**
     78 * Sets up AMD-V for the specified VM
    5879 *
    5980 * @returns VBox status code.
    6081 * @param   pVM         The VM to operate on.
    6182 */
    62 HWACCMR0DECL(int) SVMR0Setup(PVM pVM);
     83HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM);
    6384
    6485
    6586/**
    66  * Runs guest code in a SVM VM.
     87 * Runs guest code in an AMD-V VM.
    6788 *
    6889 * @note NEVER EVER turn on interrupts here. Due to our illegal entry into the kernel, it might mess things up. (XP kernel traps have been frequently observed)
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r5999 r7471  
    4949
    5050        VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
    51         Log(("VMXR0CheckError -> generic error %x\n", instrError));
    52 
    5351        pVM->hwaccm.s.vmx.ulLastInstrError = instrError;
    54     }
    55     else
    56     {
    57         Log(("VMXR0CheckError failed with %Vrc\n", rc));
    5852    }
    5953    pVM->hwaccm.s.lLastError = rc;
     
    6155
    6256/**
    63  * Sets up and activates VMX
     57 * Sets up and activates VT-x on the current CPU
     58 *
     59 * @returns VBox status code.
     60 * @param   idCpu           The identifier for the CPU the function is called on.
     61 * @param   pVM             The VM to operate on.
     62 * @param   pvPageCpu       Pointer to the global cpu page
     63 * @param   pPageCpuPhys    Physical address of the global cpu page
     64 */
     65HWACCMR0DECL(int) VMXR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     66{
     67    AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
     68    AssertReturn(pVM, VERR_INVALID_PARAMETER);
     69    AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     70
     71    /* Setup Intel VMX. */
     72    Assert(pVM->hwaccm.s.vmx.fSupported);
     73
     74    /* Set revision dword at the beginning of the VMXON structure. */
     75    *(uint32_t *)pvPageCpu = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     76
     77    /* @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
     78     * (which can have very bad consequences!!!)
     79     */
     80
     81    /* Make sure the VMX instructions don't cause #UD faults. */
     82    ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
     83
     84    /* Enter VMX Root Mode */
     85    int rc = VMXEnable(pPageCpuPhys);
     86    if (VBOX_FAILURE(rc))
     87    {
     88        VMXR0CheckError(pVM, rc);
     89        ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
     90        return VERR_VMX_VMXON_FAILED;
     91    }
     92
     93    return VINF_SUCCESS;
     94}
     95
     96/**
     97 * Deactivates VT-x on the current CPU
     98 *
     99 * @returns VBox status code.
     100 * @param   idCpu           The identifier for the CPU the function is called on.
     101 * @param   pvPageCpu       Pointer to the global cpu page
     102 * @param   pPageCpuPhys    Physical address of the global cpu page
     103 */
     104HWACCMR0DECL(int) VMXR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
     105{
     106    AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
     107    AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
     108
     109    /* Leave VMX Root Mode. */
     110    VMXDisable();
     111
     112    /* And clear the X86_CR4_VMXE bit */
     113    ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
     114    return VINF_SUCCESS;
     115}
     116
     117/**
     118 * Sets up VT-x for the specified VM
    64119 *
    65120 * @returns VBox status code.
    66121 * @param   pVM         The VM to operate on.
    67122 */
    68 HWACCMR0DECL(int) VMXR0Setup(PVM pVM)
     123HWACCMR0DECL(int) VMXR0SetupVM(PVM pVM)
    69124{
    70125    int rc = VINF_SUCCESS;
     
    74129        return VERR_INVALID_PARAMETER;
    75130
    76     /* Setup Intel VMX. */
    77     Assert(pVM->hwaccm.s.vmx.fSupported);
    78 
    79     /* Set revision dword at the beginning of both structures. */
     131    /* Set revision dword at the beginning of the VMCS structure. */
    80132    *(uint32_t *)pVM->hwaccm.s.vmx.pVMCS  = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
    81     *(uint32_t *)pVM->hwaccm.s.vmx.pVMXON = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
    82 
    83     /* @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
    84      * (which can have very bad consequences!!!)
    85      */
    86 
    87     /* Make sure the VMX instructions don't cause #UD faults. */
    88     ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
    89 
    90     /* Enter VMX Root Mode */
    91     Log(("pVMXONPhys = %VHp\n", pVM->hwaccm.s.vmx.pVMXONPhys));
    92     rc = VMXEnable(pVM->hwaccm.s.vmx.pVMXONPhys);
    93     if (VBOX_FAILURE(rc))
    94     {
    95         VMXR0CheckError(pVM, rc);
    96         return VERR_VMX_VMXON_FAILED;
    97     }
    98133
    99134    /* Clear VM Control Structure. */
     
    274309vmx_end:
    275310    VMXR0CheckError(pVM, rc);
    276     /* Leave VMX Root Mode. */
    277     VMXDisable();
    278311    return rc;
    279312}
     
    19391972
    19401973/**
    1941  * Enable VMX
     1974 * Enters the VT-x session
    19421975 *
    19431976 * @returns VBox status code.
    19441977 * @param   pVM         The VM to operate on.
    19451978 */
    1946 HWACCMR0DECL(int) VMXR0Enable(PVM pVM)
     1979HWACCMR0DECL(int) VMXR0Enter(PVM pVM)
    19471980{
    19481981    Assert(pVM->hwaccm.s.vmx.fSupported);
    19491982
    1950     /* Make sure the VMX instructions don't cause #UD faults. */
    1951     ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
    1952 
    1953     /* Enter VMX Root Mode */
    1954     int rc = VMXEnable(pVM->hwaccm.s.vmx.pVMXONPhys);
     1983    unsigned cr4 = ASMGetCR4();
     1984    if (!(cr4 & X86_CR4_VMXE))
     1985    {
     1986        AssertMsgFailed(("X86_CR4_VMXE should be set!\n"));
     1987        return VERR_VMX_X86_CR4_VMXE_CLEARED;
     1988    }
     1989
     1990    /* Activate the VM Control Structure. */
     1991    int rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
    19551992    if (VBOX_FAILURE(rc))
    19561993        return rc;
    19571994
    1958     /* Activate the VM Control Structure. */
    1959     rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
    1960     if (VBOX_FAILURE(rc))
    1961     {
    1962         /* Leave VMX Root Mode. */
    1963         VMXDisable();
    1964         return rc;
    1965     }
    19661995    pVM->hwaccm.s.vmx.fResumeVM = false;
    19671996    return VINF_SUCCESS;
     
    19701999
    19712000/**
    1972  * Disable VMX
     2001 * Leaves the VT-x session
    19732002 *
    19742003 * @returns VBox status code.
    19752004 * @param   pVM         The VM to operate on.
    19762005 */
    1977 HWACCMR0DECL(int) VMXR0Disable(PVM pVM)
     2006HWACCMR0DECL(int) VMXR0Leave(PVM pVM)
    19782007{
    19792008    Assert(pVM->hwaccm.s.vmx.fSupported);
     
    19832012    AssertRC(rc);
    19842013
    1985     /* Leave VMX Root Mode. */
    1986     VMXDisable();
    1987 
    19882014    return VINF_SUCCESS;
    19892015}
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r5999 r7471  
    11/* $Id$ */
    22/** @file
    3  * HWACCM SVM - Internal header file.
     3 * HWACCM VT-x - Internal header file.
    44 */
    55
     
    3939
    4040/**
    41  * Enable VMX
     41 * Enters the VT-x session
    4242 *
    4343 * @returns VBox status code.
    4444 * @param   pVM         The VM to operate on.
    4545 */
    46 HWACCMR0DECL(int) VMXR0Enable(PVM pVM);
     46HWACCMR0DECL(int) VMXR0Enter(PVM pVM);
    4747
    4848/**
    49  * Disable VMX
     49 * Leaves the VT-x session
    5050 *
    5151 * @returns VBox status code.
    5252 * @param   pVM         The VM to operate on.
    5353 */
    54 HWACCMR0DECL(int) VMXR0Disable(PVM pVM);
     54HWACCMR0DECL(int) VMXR0Leave(PVM pVM);
     55
    5556
    5657/**
    57  * Sets up and activates VMX
     58 * Sets up and activates VT-x on the current CPU
     59 *
     60 * @returns VBox status code.
     61 * @param   idCpu           The identifier for the CPU the function is called on.
     62 * @param   pVM             The VM to operate on.
     63 * @param   pvPageCpu       Pointer to the global cpu page
     64 * @param   pPageCpuPhys    Physical address of the global cpu page
     65 */
     66HWACCMR0DECL(int) VMXR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     67
     68/**
     69 * Deactivates VT-x on the current CPU
     70 *
     71 * @returns VBox status code.
     72 * @param   idCpu           The identifier for the CPU the function is called on.
     73 * @param   pvPageCpu       Pointer to the global cpu page
     74 * @param   pPageCpuPhys    Physical address of the global cpu page
     75 */
     76HWACCMR0DECL(int) VMXR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     77
     78/**
     79 * Sets up VT-x for the specified VM
    5880 *
    5981 * @returns VBox status code.
    6082 * @param   pVM         The VM to operate on.
    6183 */
    62 HWACCMR0DECL(int) VMXR0Setup(PVM pVM);
     84HWACCMR0DECL(int) VMXR0SetupVM(PVM pVM);
    6385
    6486
     
    82104
    83105/**
    84  * Runs guest code in a VMX VM.
     106 * Runs guest code in a VT-x VM.
    85107 *
    86108 * @note NEVER EVER turn on interrupts here. Due to our illegal entry into the kernel, it might mess things up. (XP kernel traps have been frequently observed)
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r6801 r7471  
    4040#include <iprt/assert.h>
    4141#include <iprt/stdarg.h>
     42#include <iprt/mp.h>
    4243
    4344#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
     
    6566#endif
    6667
     68/*******************************************************************************
     69*   Local Variables                                                            *
     70*******************************************************************************/
    6771
    6872/**
     
    7882
    7983    /*
    80      * Initialize the GVMM and GMM.
     84     * Initialize the GVMM, GMM.& HWACCM
    8185     */
    8286    int rc = GVMMR0Init();
     
    8690        if (RT_SUCCESS(rc))
    8791        {
     92            rc = HWACCMR0Init();
     93            if (RT_SUCCESS(rc))
     94            {
    8895#ifdef VBOX_WITH_INTERNAL_NETWORKING
    89             LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
    90             g_pIntNet = NULL;
    91             LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
    92             rc = INTNETR0Create(&g_pIntNet);
    93             if (VBOX_SUCCESS(rc))
    94             {
    95                 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
     96                LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
     97                g_pIntNet = NULL;
     98                LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
     99                rc = INTNETR0Create(&g_pIntNet);
     100                if (VBOX_SUCCESS(rc))
     101                {
     102                    LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
     103                    return VINF_SUCCESS;
     104                }
     105                g_pIntNet = NULL;
     106                LogFlow(("ModuleTerm: returns %Vrc\n", rc));
     107#else
     108                LogFlow(("ModuleInit: returns success.\n"));
    96109                return VINF_SUCCESS;
     110#endif
    97111            }
    98             g_pIntNet = NULL;
    99             LogFlow(("ModuleTerm: returns %Vrc\n", rc));
    100 #else
    101             LogFlow(("ModuleInit: returns success.\n"));
    102             return VINF_SUCCESS;
    103 #endif
    104112        }
    105113    }
     
    128136    }
    129137#endif
     138
     139    /* Global HWACCM cleanup */
     140    HWACCMR0Term();
    130141
    131142    /*
     
    212223         * Init HWACCM.
    213224         */
    214         RTCCUINTREG fFlags = ASMIntDisableFlags();
    215         rc = HWACCMR0Init(pVM);
    216         ASMSetFlags(fFlags);
     225        rc = HWACCMR0InitVM(pVM);
    217226        if (RT_SUCCESS(rc))
    218227        {
     
    609618            RTCCUINTREG uFlags = ASMIntDisableFlags();
    610619#endif
    611             int rc = HWACCMR0Enable(pVM);
     620            int rc = HWACCMR0Enter(pVM);
    612621            if (VBOX_SUCCESS(rc))
    613622            {
    614623                rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
    615                 int rc2 = HWACCMR0Disable(pVM);
     624                int rc2 = HWACCMR0Leave(pVM);
    616625                AssertRC(rc2);
    617626            }
     
    732741
    733742        /*
     743         * Attempt to enable hwacc mode and check the current setting.
     744         *
     745         */
     746        case VMMR0_DO_HWACC_ENABLE:
     747            return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
     748
     749        /*
    734750         * Setup the hardware accelerated raw-mode session.
    735751         */
     
    737753        {
    738754            RTCCUINTREG fFlags = ASMIntDisableFlags();
    739             int rc = HWACCMR0SetupVMX(pVM);
     755            int rc = HWACCMR0SetupVM(pVM);
    740756            ASMSetFlags(fFlags);
    741757            return rc;
     
    747763        case VMMR0_DO_CALL_HYPERVISOR:
    748764        {
    749             /* Safety precaution as VMX disables the switcher. */
     765            /* Safety precaution as HWACCM can disable the switcher. */
    750766            Assert(!pVM->vmm.s.fSwitcherDisabled);
    751             if (pVM->vmm.s.fSwitcherDisabled)
     767            if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
    752768                return VERR_NOT_SUPPORTED;
    753769
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette