VirtualBox

Changeset 43387 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Sep 21, 2012 9:40:25 AM (13 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
80859
Message:

VMM: HM cleanup.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
7 edited
3 moved

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r42464 r43387  
    2626#include <VBox/err.h>
    2727#include <VBox/log.h>
    28 #include <VBox/vmm/hwaccm.h>
     28#include <VBox/vmm/hm.h>
    2929#include <iprt/assert.h>
    3030#include <iprt/asm-amd64-x86.h>
     
    407407        if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
    408408        {
    409             HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
     409            HMR0SaveFPUState(pVM, pVCpu, pCtx);
    410410            cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
    411411        }
     
    493493            uint64_t dr6 = pCtx->dr[6];
    494494
    495             HWACCMR0SaveDebugState(pVM, pVCpu, pCtx);
     495            HMR0SaveDebugState(pVM, pVCpu, pCtx);
    496496            if (!fDR6) /* dr6 was already up-to-date */
    497497                pCtx->dr[6] = dr6;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r43373 r43387  
    2020*   Header Files                                                               *
    2121*******************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_HWACCM
    23 #include <VBox/vmm/hwaccm.h>
     22#define LOG_GROUP LOG_GROUP_HM
     23#include <VBox/vmm/hm.h>
    2424#include <VBox/vmm/pgm.h>
    25 #include "HWACCMInternal.h"
     25#include "HMInternal.h"
    2626#include <VBox/vmm/vm.h>
    27 #include <VBox/vmm/hwacc_vmx.h>
    28 #include <VBox/vmm/hwacc_svm.h>
     27#include <VBox/vmm/hm_vmx.h>
     28#include <VBox/vmm/hm_svm.h>
    2929#include <VBox/err.h>
    3030#include <VBox/log.h>
     
    178178     *          simpler and hopefully easier to understand. */
    179179    bool                            fEnabled;
    180     /** Serialize initialization in HWACCMR0EnableAllCpus. */
     180    /** Serialize initialization in HMR0EnableAllCpus. */
    181181    RTONCE                          EnableAllCpusOnce;
    182182} g_HvmR0;
     
    605605 * @returns VBox status code.
    606606 */
    607 VMMR0DECL(int) HWACCMR0Init(void)
     607VMMR0DECL(int) HMR0Init(void)
    608608{
    609609    /*
     
    676676            hmR0InitAmd(u32FeaturesEDX);
    677677        else
    678             g_HvmR0.lLastError = VERR_HWACCM_UNKNOWN_CPU;
     678            g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU;
    679679    }
    680680    else
    681         g_HvmR0.lLastError = VERR_HWACCM_NO_CPUID;
     681        g_HvmR0.lLastError = VERR_HM_NO_CPUID;
    682682
    683683    /*
     
    705705 * @returns VBox status code.
    706706 */
    707 VMMR0DECL(int) HWACCMR0Term(void)
     707VMMR0DECL(int) HMR0Term(void)
    708708{
    709709    int rc;
     
    768768
    769769/**
    770  * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
     770 * Worker function used by hmR0PowerCallback  and HMR0Init to initalize
    771771 * VT-x on a CPU.
    772772 *
     
    809809
    810810/**
    811  * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
     811 * Worker function used by hmR0PowerCallback  and HMR0Init to initalize
    812812 * VT-x / AMD-V on a CPU.
    813813 *
     
    911911
    912912/**
    913  * RTOnce callback employed by HWACCMR0EnableAllCpus.
     913 * RTOnce callback employed by HMR0EnableAllCpus.
    914914 *
    915915 * @returns VBox status code.
     
    934934     * The global init variable is set by the first VM.
    935935     */
    936     g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
     936    g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit;
    937937
    938938    for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
     
    953953        if (RT_SUCCESS(rc))
    954954            /* If the host provides a VT-x init API, then we'll rely on that for global init. */
    955             g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
     955            g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;
    956956        else
    957957            AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc));
     
    996996
    997997/**
    998  * Sets up HWACCM on all cpus.
     998 * Sets up HM on all cpus.
    999999 *
    10001000 * @returns VBox status code.
    10011001 * @param   pVM                 Pointer to the VM.
    10021002 */
    1003 VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
    1004 {
    1005     /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     1003VMMR0DECL(int) HMR0EnableAllCpus(PVM pVM)
     1004{
     1005    /* Make sure we don't touch hm after we've disabled hm in
    10061006       preparation of a suspend. */
    10071007    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    1008         return VERR_HWACCM_SUSPEND_PENDING;
     1008        return VERR_HM_SUSPEND_PENDING;
    10091009
    10101010    return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL);
     
    10861086    /*
    10871087     * We only care about uninitializing a CPU that is going offline. When a
    1088      * CPU comes online, the initialization is done lazily in HWACCMR0Enter().
     1088     * CPU comes online, the initialization is done lazily in HMR0Enter().
    10891089     */
    10901090    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    11801180 * @param   pVM         Pointer to the VM.
    11811181 */
    1182 VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
     1182VMMR0DECL(int) HMR0InitVM(PVM pVM)
    11831183{
    11841184    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    11851185
    11861186#ifdef LOG_ENABLED
    1187     SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
     1187    SUPR0Printf("HMR0InitVM: %p\n", pVM);
    11881188#endif
    11891189
    1190     /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
     1190    /* Make sure we don't touch hm after we've disabled hm in preparation of a suspend. */
    11911191    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    1192         return VERR_HWACCM_SUSPEND_PENDING;
     1192        return VERR_HM_SUSPEND_PENDING;
    11931193
    11941194    /*
    11951195     * Copy globals to the VM structure.
    11961196     */
    1197     pVM->hwaccm.s.vmx.fSupported            = g_HvmR0.vmx.fSupported;
    1198     pVM->hwaccm.s.svm.fSupported            = g_HvmR0.svm.fSupported;
    1199 
    1200     pVM->hwaccm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
    1201     pVM->hwaccm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
    1202     pVM->hwaccm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.feature_ctrl;
    1203     pVM->hwaccm.s.vmx.hostCR4               = g_HvmR0.vmx.hostCR4;
    1204     pVM->hwaccm.s.vmx.hostEFER              = g_HvmR0.vmx.hostEFER;
    1205     pVM->hwaccm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.vmx_basic_info;
    1206     pVM->hwaccm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.vmx_pin_ctls;
    1207     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.vmx_proc_ctls;
    1208     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.vmx_proc_ctls2;
    1209     pVM->hwaccm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.vmx_exit;
    1210     pVM->hwaccm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.vmx_entry;
    1211     pVM->hwaccm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.vmx_misc;
    1212     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
    1213     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
    1214     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
    1215     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
    1216     pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
    1217     pVM->hwaccm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
    1218     pVM->hwaccm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
    1219     pVM->hwaccm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
    1220     pVM->hwaccm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
    1221     pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = g_HvmR0.cpuid.u32AMDFeatureECX;
    1222     pVM->hwaccm.s.cpuid.u32AMDFeatureEDX    = g_HvmR0.cpuid.u32AMDFeatureEDX;
    1223     pVM->hwaccm.s.lLastError                = g_HvmR0.lLastError;
    1224 
    1225     pVM->hwaccm.s.uMaxASID                  = g_HvmR0.uMaxASID;
    1226 
    1227 
    1228     if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
    1229     {
    1230         pVM->hwaccm.s.cMaxResumeLoops       = 1024;
     1197    pVM->hm.s.vmx.fSupported            = g_HvmR0.vmx.fSupported;
     1198    pVM->hm.s.svm.fSupported            = g_HvmR0.svm.fSupported;
     1199
     1200    pVM->hm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
     1201    pVM->hm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
     1202    pVM->hm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.feature_ctrl;
     1203    pVM->hm.s.vmx.hostCR4               = g_HvmR0.vmx.hostCR4;
     1204    pVM->hm.s.vmx.hostEFER              = g_HvmR0.vmx.hostEFER;
     1205    pVM->hm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.vmx_basic_info;
     1206    pVM->hm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.vmx_pin_ctls;
     1207    pVM->hm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.vmx_proc_ctls;
     1208    pVM->hm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.vmx_proc_ctls2;
     1209    pVM->hm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.vmx_exit;
     1210    pVM->hm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.vmx_entry;
     1211    pVM->hm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.vmx_misc;
     1212    pVM->hm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
     1213    pVM->hm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
     1214    pVM->hm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
     1215    pVM->hm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
     1216    pVM->hm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
     1217    pVM->hm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
     1218    pVM->hm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
     1219    pVM->hm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
     1220    pVM->hm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
     1221    pVM->hm.s.cpuid.u32AMDFeatureECX    = g_HvmR0.cpuid.u32AMDFeatureECX;
     1222    pVM->hm.s.cpuid.u32AMDFeatureEDX    = g_HvmR0.cpuid.u32AMDFeatureEDX;
     1223    pVM->hm.s.lLastError                = g_HvmR0.lLastError;
     1224
     1225    pVM->hm.s.uMaxASID                  = g_HvmR0.uMaxASID;
     1226
     1227
     1228    if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
     1229    {
     1230        pVM->hm.s.cMaxResumeLoops       = 1024;
    12311231#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    12321232        if (RTThreadPreemptIsPendingTrusty())
    1233             pVM->hwaccm.s.cMaxResumeLoops   = 8192;
     1233            pVM->hm.s.cMaxResumeLoops   = 8192;
    12341234#endif
    12351235    }
     
    12421242        PVMCPU pVCpu = &pVM->aCpus[i];
    12431243
    1244         pVCpu->hwaccm.s.idEnteredCpu        = NIL_RTCPUID;
     1244        pVCpu->hm.s.idEnteredCpu        = NIL_RTCPUID;
    12451245
    12461246        /* Invalidate the last cpu we were running on. */
    1247         pVCpu->hwaccm.s.idLastCpu           = NIL_RTCPUID;
     1247        pVCpu->hm.s.idLastCpu           = NIL_RTCPUID;
    12481248
    12491249        /* We'll aways increment this the first time (host uses ASID 0) */
    1250         pVCpu->hwaccm.s.uCurrentASID        = 0;
     1250        pVCpu->hm.s.uCurrentASID        = 0;
    12511251    }
    12521252
     
    12601260     */
    12611261    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1262     PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
     1262    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    12631263    ASMAtomicWriteBool(&pCpu->fInUse, true);
    12641264    ASMSetFlags(fFlags);
     
    12771277 * @param   pVM         Pointer to the VM.
    12781278 */
    1279 VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
    1280 {
    1281     Log(("HWACCMR0TermVM: %p\n", pVM));
     1279VMMR0DECL(int) HMR0TermVM(PVM pVM)
     1280{
     1281    Log(("HMR0TermVM: %p\n", pVM));
    12821282    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    12831283
    1284     /* Make sure we don't touch hm after we've disabled hwaccm in preparation
     1284    /* Make sure we don't touch hm after we've disabled hm in preparation
    12851285       of a suspend. */
    12861286    /** @todo r=bird: This cannot be right, the termination functions are
    12871287     *        just freeing memory and resetting pVM/pVCpu members...
    12881288     *  ==> memory leak. */
    1289     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1289    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    12901290
    12911291    /*
     
    12961296     */
    12971297    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1298     PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
     1298    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    12991299    ASMAtomicWriteBool(&pCpu->fInUse, true);
    13001300    ASMSetFlags(fFlags);
     
    13151315 * @param   pVM         Pointer to the VM.
    13161316 */
    1317 VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
    1318 {
    1319     Log(("HWACCMR0SetupVM: %p\n", pVM));
     1317VMMR0DECL(int) HMR0SetupVM(PVM pVM)
     1318{
     1319    Log(("HMR0SetupVM: %p\n", pVM));
    13201320    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    13211321
    1322     /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     1322    /* Make sure we don't touch hm after we've disabled hm in
    13231323       preparation of a suspend. */
    1324     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1324    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    13251325
    13261326
     
    13361336    /* On first entry we'll sync everything. */
    13371337    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1338         pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
     1338        pVM->aCpus[i].hm.s.fContextUseFlags = HM_CHANGED_ALL;
    13391339
    13401340    /* Enable VT-x or AMD-V if local init is required. */
     
    13721372 * @remarks This is called with preemption disabled.
    13731373 */
    1374 VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
     1374VMMR0DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
    13751375{
    13761376    RTCPUID         idCpu = RTMpCpuId();
     
    13781378
    13791379    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
    1380     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1380    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    13811381    ASMAtomicWriteBool(&pCpu->fInUse, true);
    13821382
    1383     AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
    1384     pVCpu->hwaccm.s.idEnteredCpu = idCpu;
     1383    AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu));
     1384    pVCpu->hm.s.idEnteredCpu = idCpu;
    13851385
    13861386    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     
    13931393
    13941394    /* Always reload the host context and the guest's CR0 register. (!!!!) */
    1395     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
     1395    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
    13961396
    13971397    /* Setup the register and mask according to the current execution mode. */
    13981398    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
    1399         pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
     1399        pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
    14001400    else
    1401         pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
     1401        pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
    14021402
    14031403    /* Enable VT-x or AMD-V if local init is required, or enable if it's a
     
    14321432       and ring-3 calls. */
    14331433    if (RT_FAILURE(rc))
    1434         pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
     1434        pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    14351435    return rc;
    14361436}
     
    14441444 * @param   pVCpu      Pointer to the VMCPU.
    14451445 *
    1446  * @remarks Called with preemption disabled just like HWACCMR0Enter, our
     1446 * @remarks Called with preemption disabled just like HMR0Enter, our
    14471447 *          counterpart.
    14481448 */
    1449 VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
     1449VMMR0DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu)
    14501450{
    14511451    int             rc;
     
    14551455
    14561456    /** @todo r=bird: This can't be entirely right? */
    1457     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1457    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    14581458
    14591459    /*
     
    14701470        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    14711471
    1472         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1472        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    14731473        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    14741474    }
     
    14791479       guests, so we must make sure the recompiler flushes its TLB the next
    14801480       time it executes code. */
    1481     if (    pVM->hwaccm.s.fNestedPaging
     1481    if (    pVM->hm.s.fNestedPaging
    14821482        &&  CPUMIsGuestInPagedProtectedModeEx(pCtx))
    14831483        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     
    14851485    /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    14861486       and ring-3 calls. */
    1487     AssertMsgStmt(   pVCpu->hwaccm.s.idEnteredCpu == idCpu
     1487    AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    14881488                  || RT_FAILURE_NP(rc),
    1489                   ("Owner is %u, I'm %u", pVCpu->hwaccm.s.idEnteredCpu, idCpu),
     1489                  ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    14901490                  rc = VERR_HM_WRONG_CPU_1);
    1491     pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
     1491    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    14921492
    14931493    /*
     
    15001500
    15011501        /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
    1502         pVCpu->hwaccm.s.idLastCpu    = NIL_RTCPUID;
    1503         pVCpu->hwaccm.s.uCurrentASID = 0;
     1502        pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1503        pVCpu->hm.s.uCurrentASID = 0;
    15041504        VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    15051505    }
     
    15181518 *
    15191519 * @remarks Called with preemption disabled and after first having called
    1520  *          HWACCMR0Enter.
    1521  */
    1522 VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
     1520 *          HMR0Enter.
     1521 */
     1522VMMR0DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
    15231523{
    15241524#ifdef VBOX_STRICT
     
    15261526    Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    15271527    Assert(pCpu->fConfigured);
    1528     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1528    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    15291529    Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
    15301530#endif
     
    15521552 * @param   pCtx        Pointer to the guest CPU context.
    15531553 */
    1554 VMMR0DECL(int)   HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1555 {
    1556     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFpu64SwitchBack);
    1557     if (pVM->hwaccm.s.vmx.fSupported)
    1558         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
    1559     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
     1554VMMR0DECL(int)   HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1555{
     1556    STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack);
     1557    if (pVM->hm.s.vmx.fSupported)
     1558        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
     1559    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
    15601560}
    15611561
     
    15691569 * @param   pCtx        Pointer to the guest CPU context.
    15701570 */
    1571 VMMR0DECL(int)   HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1572 {
    1573     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDebug64SwitchBack);
    1574     if (pVM->hwaccm.s.vmx.fSupported)
    1575         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
    1576     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
     1571VMMR0DECL(int)   HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1572{
     1573    STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack);
     1574    if (pVM->hm.s.vmx.fSupported)
     1575        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
     1576    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
    15771577}
    15781578
     
    15841584 * @param   pVM         Pointer to the VM.
    15851585 */
    1586 VMMR0DECL(int)   HWACCMR0TestSwitcher3264(PVM pVM)
     1586VMMR0DECL(int)   HMR0TestSwitcher3264(PVM pVM)
    15871587{
    15881588    PVMCPU   pVCpu = &pVM->aCpus[0];
     
    15911591    int      rc;
    15921592
    1593     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
    1594     if (pVM->hwaccm.s.vmx.fSupported)
    1595         rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
     1593    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
     1594    if (pVM->hm.s.vmx.fSupported)
     1595        rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
    15961596    else
    1597         rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
    1598     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     1597        rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
     1598    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    15991599
    16001600    return rc;
     
    16081608 * @returns Suspend pending or not.
    16091609 */
    1610 VMMR0DECL(bool) HWACCMR0SuspendPending(void)
     1610VMMR0DECL(bool) HMR0SuspendPending(void)
    16111611{
    16121612    return ASMAtomicReadBool(&g_HvmR0.fSuspended);
     
    16201620 * @returns The cpu structure pointer.
    16211621 */
    1622 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void)
     1622VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void)
    16231623{
    16241624    RTCPUID idCpu = RTMpCpuId();
     
    16351635 * @param   idCpu       id of the VCPU.
    16361636 */
    1637 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
     1637VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu)
    16381638{
    16391639    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
     
    16521652 * @param   cbSize          Read size.
    16531653 */
    1654 VMMR0DECL(void) HWACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
    1655 {
    1656     pVCpu->hwaccm.s.PendingIO.enmType         = HWACCMPENDINGIO_PORT_READ;
    1657     pVCpu->hwaccm.s.PendingIO.GCPtrRip        = GCPtrRip;
    1658     pVCpu->hwaccm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
    1659     pVCpu->hwaccm.s.PendingIO.s.Port.uPort    = uPort;
    1660     pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal  = uAndVal;
    1661     pVCpu->hwaccm.s.PendingIO.s.Port.cbSize   = cbSize;
     1654VMMR0DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
     1655{
     1656    pVCpu->hm.s.PendingIO.enmType         = HMPENDINGIO_PORT_READ;
     1657    pVCpu->hm.s.PendingIO.GCPtrRip        = GCPtrRip;
     1658    pVCpu->hm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
     1659    pVCpu->hm.s.PendingIO.s.Port.uPort    = uPort;
     1660    pVCpu->hm.s.PendingIO.s.Port.uAndVal  = uAndVal;
     1661    pVCpu->hm.s.PendingIO.s.Port.cbSize   = cbSize;
    16621662    return;
    16631663}
     
    16731673 * @param   cbSize          Read size.
    16741674 */
    1675 VMMR0DECL(void) HWACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
    1676 {
    1677     pVCpu->hwaccm.s.PendingIO.enmType         = HWACCMPENDINGIO_PORT_WRITE;
    1678     pVCpu->hwaccm.s.PendingIO.GCPtrRip        = GCPtrRip;
    1679     pVCpu->hwaccm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
    1680     pVCpu->hwaccm.s.PendingIO.s.Port.uPort    = uPort;
    1681     pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal  = uAndVal;
    1682     pVCpu->hwaccm.s.PendingIO.s.Port.cbSize   = cbSize;
     1675VMMR0DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
     1676{
     1677    pVCpu->hm.s.PendingIO.enmType         = HMPENDINGIO_PORT_WRITE;
     1678    pVCpu->hm.s.PendingIO.GCPtrRip        = GCPtrRip;
     1679    pVCpu->hm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
     1680    pVCpu->hm.s.PendingIO.s.Port.uPort    = uPort;
     1681    pVCpu->hm.s.PendingIO.s.Port.uAndVal  = uAndVal;
     1682    pVCpu->hm.s.PendingIO.s.Port.cbSize   = cbSize;
    16831683    return;
    16841684}
     
    16911691 * @returns VBox status code.
    16921692 * @param   pVM             Pointer to the VM.
     1693 * @param   enmSwitcher     The switcher we're about to use.
    16931694 * @param   pfVTxDisabled   Where to store whether VT-x was disabled or not.
    16941695 */
    1695 VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
     1696VMMR0DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled)
    16961697{
    16971698    Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    16991700    *pfVTxDisabled = false;
    17001701
    1701     if (   !g_HvmR0.fEnabled
    1702         || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */
    1703         || !g_HvmR0.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
    1704         return VINF_SUCCESS;    /* nothing to do */
    1705 
    1706     switch (VMMGetSwitcher(pVM))
     1702    /* No such issues with AMD-V */
     1703    if (!g_HvmR0.vmx.fSupported)
     1704        return VINF_SUCCESS;
     1705
     1706    /* Check if the swithcing we're up to is safe. */
     1707    switch (enmSwitcher)
    17071708    {
    17081709        case VMMSWITCHER_32_TO_32:
     
    17201721    }
    17211722
    1722     PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
     1723    /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x,
     1724       regardless of whether we're currently using VT-x or not. */
     1725    if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     1726    {
     1727        *pfVTxDisabled = SUPR0SuspendVTxOnCpu();
     1728        return VINF_SUCCESS;
     1729    }
     1730
     1731    /** @todo Check if this code is presumtive wrt other VT-x users on the
     1732    *        system... */
     1733
     1734    /* Nothing to do if we haven't enabled VT-x. */
     1735    if (!g_HvmR0.fEnabled)
     1736        return VINF_SUCCESS;
     1737
     1738    /* Local init implies the CPU is currently not in VMX root mode. */
     1739    if (!g_HvmR0.fGlobalInit)
     1740        return VINF_SUCCESS;
     1741
     1742    /* Ok, disable VT-x. */
     1743    PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
    17231744    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
    17241745
     
    17341755 * switcher turned off paging.
    17351756 *
    1736  * @returns VBox status code.
    17371757 * @param   pVM             Pointer to the VM.
    17381758 * @param   fVTxDisabled    Whether VT-x was disabled or not.
    17391759 */
    1740 VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
     1760VMMR0DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
    17411761{
    17421762    Assert(!(ASMGetFlags() & X86_EFL_IF));
    17431763
    17441764    if (!fVTxDisabled)
    1745         return VINF_SUCCESS;    /* nothing to do */
    1746 
    1747     Assert(g_HvmR0.fEnabled);
     1765        return;         /* nothing to do */
     1766
    17481767    Assert(g_HvmR0.vmx.fSupported);
    1749     Assert(g_HvmR0.fGlobalInit);
    1750 
    1751     PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1752     AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
    1753 
    1754     void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
    1755     RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    1756     return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     1768    if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     1769        SUPR0ResumeVTxOnCpu(fVTxDisabled);
     1770    else
     1771    {
     1772        Assert(g_HvmR0.fEnabled);
     1773        Assert(g_HvmR0.fGlobalInit);
     1774
     1775        PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     1776        AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ);
     1777
     1778        void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     1779        RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     1780        VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     1781    }
    17571782}
    17581783
     
    17661791 * @param   pszMsg   Message to prepend the log entry with.
    17671792 */
    1768 VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
     1793VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
    17691794{
    17701795    /*
     
    18871912 * @param   pCtx        Pointer to the CPU context.
    18881913 */
    1889 VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1914VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    18901915{
    18911916    NOREF(pVM);
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r43373 r43387  
    2121%include "VBox/asmdefs.mac"
    2222%include "VBox/err.mac"
    23 %include "VBox/vmm/hwacc_vmx.mac"
     23%include "VBox/vmm/hm_vmx.mac"
    2424%include "VBox/vmm/cpum.mac"
    2525%include "iprt/x86.mac"
    26 %include "HWACCMInternal.mac"
     26%include "HMInternal.mac"
    2727
    2828%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
     
    5656   ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
    5757   ; risk loading a stale LDT value or something invalid.
    58    %define HWACCM_64_BIT_USE_NULL_SEL
     58   %define HM_64_BIT_USE_NULL_SEL
    5959  %endif
    6060 %endif
     
    157157; trashes, rax, rdx & rcx
    158158%macro MYPUSHSEGS64 2
    159  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     159 %ifndef HM_64_BIT_USE_NULL_SEL
    160160   mov     %2, es
    161161   push    %1
     
    169169   push    rdx
    170170   push    rax
    171  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     171 %ifndef HM_64_BIT_USE_NULL_SEL
    172172   push    fs
    173173 %endif
     
    178178   push    rdx
    179179   push    rax
    180  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     180 %ifndef HM_64_BIT_USE_NULL_SEL
    181181   push    gs
    182182 %endif
     
    186186%macro MYPOPSEGS64 2
    187187   ; Note: do not step through this code with a debugger!
    188  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     188 %ifndef HM_64_BIT_USE_NULL_SEL
    189189   xor     eax, eax
    190190   mov     ds, ax
     
    194194 %endif
    195195
    196  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     196 %ifndef HM_64_BIT_USE_NULL_SEL
    197197   pop     gs
    198198 %endif
     
    202202   wrmsr
    203203
    204  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     204 %ifndef HM_64_BIT_USE_NULL_SEL
    205205   pop     fs
    206206 %endif
     
    211211   ; Now it's safe to step again
    212212
    213  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     213 %ifndef HM_64_BIT_USE_NULL_SEL
    214214   pop     %1
    215215   mov     ds, %2
     
    971971; * @param  pIdtr        Where to store the 64-bit IDTR.
    972972; */
    973 ;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
    974 ALIGNCODE(16)
    975 BEGINPROC hwaccmR0Get64bitGDTRandIDTR
     973;DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
     974ALIGNCODE(16)
     975BEGINPROC hmR0Get64bitGDTRandIDTR
    976976    db      0xea                        ; jmp far .sixtyfourbit_mode
    977977    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     
    991991    dd      .the_end, NAME(SUPR0AbsKernelCS)
    992992BITS 32
    993 ENDPROC   hwaccmR0Get64bitGDTRandIDTR
     993ENDPROC   hmR0Get64bitGDTRandIDTR
    994994
    995995
     
    998998; * @returns CR3
    999999; */
    1000 ;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
    1001 ALIGNCODE(16)
    1002 BEGINPROC hwaccmR0Get64bitCR3
     1000;DECLASM(uint64_t) hmR0Get64bitCR3(void);
     1001ALIGNCODE(16)
     1002BEGINPROC hmR0Get64bitCR3
    10031003    db      0xea                        ; jmp far .sixtyfourbit_mode
    10041004    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     
    10161016    dd      .the_end, NAME(SUPR0AbsKernelCS)
    10171017BITS 32
    1018 ENDPROC   hwaccmR0Get64bitCR3
     1018ENDPROC   hmR0Get64bitCR3
    10191019
    10201020%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     
    10261026; load the guest ones when necessary.
    10271027;
    1028 ; @cproto       DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
     1028; @cproto       DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
    10291029;
    10301030; @returns      eax
     
    10371037; @param        pfnStartVM      msc:[rbp+38h]
    10381038;
    1039 ; @remarks      This is essentially the same code as hwaccmR0SVMRunWrapXMM, only the parameters differ a little bit.
     1039; @remarks      This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
    10401040;
    10411041; ASSUMING 64-bit and windows for now.
    10421042ALIGNCODE(16)
    1043 BEGINPROC hwaccmR0VMXStartVMWrapXMM
     1043BEGINPROC hmR0VMXStartVMWrapXMM
    10441044        push    xBP
    10451045        mov     xBP, xSP
     
    11481148        leave
    11491149        ret
    1150 ENDPROC   hwaccmR0VMXStartVMWrapXMM
     1150ENDPROC   hmR0VMXStartVMWrapXMM
    11511151
    11521152;;
     
    11541154; load the guest ones when necessary.
    11551155;
    1156 ; @cproto       DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
     1156; @cproto       DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
    11571157;
    11581158; @returns      eax
     
    11651165; @param        pfnVMRun        msc:[rbp+38h]
    11661166;
    1167 ; @remarks      This is essentially the same code as hwaccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
     1167; @remarks      This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
    11681168;
    11691169; ASSUMING 64-bit and windows for now.
    11701170ALIGNCODE(16)
    1171 BEGINPROC hwaccmR0SVMRunWrapXMM
     1171BEGINPROC hmR0SVMRunWrapXMM
    11721172        push    xBP
    11731173        mov     xBP, xSP
     
    12761276        leave
    12771277        ret
    1278 ENDPROC   hwaccmR0SVMRunWrapXMM
     1278ENDPROC   hmR0SVMRunWrapXMM
    12791279
    12801280%endif ; VBOX_WITH_KERNEL_USING_XMM
     
    13001300%endif
    13011301
    1302 %include "HWACCMR0Mixed.mac"
     1302%include "HMR0Mixed.mac"
    13031303
    13041304
     
    15031503 %define MYPOPSEGS      MYPOPSEGS64
    15041504
    1505  %include "HWACCMR0Mixed.mac"
     1505 %include "HMR0Mixed.mac"
    15061506%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r43373 r43387  
    11; $Id$
    22;; @file
    3 ; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
     3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
    44;
    5 ; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
     5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
    66;
    77
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r43353 r43387  
    1919*   Header Files                                                               *
    2020*******************************************************************************/
    21 #define LOG_GROUP LOG_GROUP_HWACCM
    22 #include <VBox/vmm/hwaccm.h>
     21#define LOG_GROUP LOG_GROUP_HM
     22#include <VBox/vmm/hm.h>
    2323#include <VBox/vmm/pgm.h>
    2424#include <VBox/vmm/selm.h>
     
    2828#include <VBox/vmm/tm.h>
    2929#include <VBox/vmm/pdmapi.h>
    30 #include "HWACCMInternal.h"
     30#include "HMInternal.h"
    3131#include <VBox/vmm/vm.h>
    32 #include <VBox/vmm/hwacc_svm.h>
     32#include <VBox/vmm/hm_svm.h>
    3333#include <VBox/err.h>
    3434#include <VBox/log.h>
     
    9292         */
    9393        if (    pVM
    94             &&  pVM->hwaccm.s.svm.fIgnoreInUseError)
     94            &&  pVM->hm.s.svm.fIgnoreInUseError)
    9595        {
    9696            pCpu->fIgnoreAMDVInUseError = true;
     
    159159    int rc;
    160160
    161     pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
     161    pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
    162162
    163163    /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
    164     rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);
     164    rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);
    165165    if (RT_FAILURE(rc))
    166166        return rc;
    167167
    168     pVM->hwaccm.s.svm.pIOBitmap     = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
    169     pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
     168    pVM->hm.s.svm.pIOBitmap     = RTR0MemObjAddress(pVM->hm.s.svm.pMemObjIOBitmap);
     169    pVM->hm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.pMemObjIOBitmap, 0);
    170170    /* Set all bits to intercept all IO accesses. */
    171     ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
     171    ASMMemFill32(pVM->hm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
    172172
    173173    /*
     
    199199    {
    200200        Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
    201         pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
     201        pVM->hm.s.svm.fAlwaysFlushTLB = true;
    202202    }
    203203
     
    207207        PVMCPU pVCpu = &pVM->aCpus[i];
    208208
    209         pVCpu->hwaccm.s.svm.pMemObjVMCBHost  = NIL_RTR0MEMOBJ;
    210         pVCpu->hwaccm.s.svm.pMemObjVMCB      = NIL_RTR0MEMOBJ;
    211         pVCpu->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     209        pVCpu->hm.s.svm.pMemObjVMCBHost  = NIL_RTR0MEMOBJ;
     210        pVCpu->hm.s.svm.pMemObjVMCB      = NIL_RTR0MEMOBJ;
     211        pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    212212
    213213        /* Allocate one page for the host context */
    214         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
     214        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
    215215        if (RT_FAILURE(rc))
    216216            return rc;
    217217
    218         pVCpu->hwaccm.s.svm.pVMCBHost     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCBHost);
    219         pVCpu->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 0);
    220         Assert(pVCpu->hwaccm.s.svm.pVMCBHostPhys < _4G);
    221         ASMMemZeroPage(pVCpu->hwaccm.s.svm.pVMCBHost);
     218        pVCpu->hm.s.svm.pVMCBHost     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCBHost);
     219        pVCpu->hm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCBHost, 0);
     220        Assert(pVCpu->hm.s.svm.pVMCBHostPhys < _4G);
     221        ASMMemZeroPage(pVCpu->hm.s.svm.pVMCBHost);
    222222
    223223        /* Allocate one page for the VM control block (VMCB). */
    224         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
     224        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
    225225        if (RT_FAILURE(rc))
    226226            return rc;
    227227
    228         pVCpu->hwaccm.s.svm.pVMCB     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCB);
    229         pVCpu->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCB, 0);
    230         Assert(pVCpu->hwaccm.s.svm.pVMCBPhys < _4G);
    231         ASMMemZeroPage(pVCpu->hwaccm.s.svm.pVMCB);
     228        pVCpu->hm.s.svm.pVMCB     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCB);
     229        pVCpu->hm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCB, 0);
     230        Assert(pVCpu->hm.s.svm.pVMCBPhys < _4G);
     231        ASMMemZeroPage(pVCpu->hm.s.svm.pVMCB);
    232232
    233233        /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
    234         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);
     234        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);
    235235        if (RT_FAILURE(rc))
    236236            return rc;
    237237
    238         pVCpu->hwaccm.s.svm.pMSRBitmap     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap);
    239         pVCpu->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0);
     238        pVCpu->hm.s.svm.pMSRBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjMSRBitmap);
     239        pVCpu->hm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjMSRBitmap, 0);
    240240        /* Set all bits to intercept all MSR accesses. */
    241         ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);
     241        ASMMemFill32(pVCpu->hm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);
    242242    }
    243243
     
    258258        PVMCPU pVCpu = &pVM->aCpus[i];
    259259
    260         if (pVCpu->hwaccm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)
    261         {
    262             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, false);
    263             pVCpu->hwaccm.s.svm.pVMCBHost       = 0;
    264             pVCpu->hwaccm.s.svm.pVMCBHostPhys   = 0;
    265             pVCpu->hwaccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
    266         }
    267 
    268         if (pVCpu->hwaccm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)
    269         {
    270             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjVMCB, false);
    271             pVCpu->hwaccm.s.svm.pVMCB       = 0;
    272             pVCpu->hwaccm.s.svm.pVMCBPhys   = 0;
    273             pVCpu->hwaccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
    274         }
    275         if (pVCpu->hwaccm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    276         {
    277             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, false);
    278             pVCpu->hwaccm.s.svm.pMSRBitmap       = 0;
    279             pVCpu->hwaccm.s.svm.pMSRBitmapPhys   = 0;
    280             pVCpu->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    281         }
    282     }
    283     if (pVM->hwaccm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)
    284     {
    285         RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
    286         pVM->hwaccm.s.svm.pIOBitmap       = 0;
    287         pVM->hwaccm.s.svm.pIOBitmapPhys   = 0;
    288         pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
     260        if (pVCpu->hm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)
     261        {
     262            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCBHost, false);
     263            pVCpu->hm.s.svm.pVMCBHost       = 0;
     264            pVCpu->hm.s.svm.pVMCBHostPhys   = 0;
     265            pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
     266        }
     267
     268        if (pVCpu->hm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)
     269        {
     270            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCB, false);
     271            pVCpu->hm.s.svm.pVMCB       = 0;
     272            pVCpu->hm.s.svm.pVMCBPhys   = 0;
     273            pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
     274        }
     275        if (pVCpu->hm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
     276        {
     277            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjMSRBitmap, false);
     278            pVCpu->hm.s.svm.pMSRBitmap       = 0;
     279            pVCpu->hm.s.svm.pMSRBitmapPhys   = 0;
     280            pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     281        }
     282    }
     283    if (pVM->hm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)
     284    {
     285        RTR0MemObjFree(pVM->hm.s.svm.pMemObjIOBitmap, false);
     286        pVM->hm.s.svm.pIOBitmap       = 0;
     287        pVM->hm.s.svm.pIOBitmapPhys   = 0;
     288        pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
    289289    }
    290290    return VINF_SUCCESS;
     
    303303
    304304    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    305     Assert(pVM->hwaccm.s.svm.fSupported);
     305    Assert(pVM->hm.s.svm.fSupported);
    306306
    307307    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    308308    {
    309309        PVMCPU    pVCpu = &pVM->aCpus[i];
    310         SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hwaccm.s.svm.pVMCB;
     310        SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pVMCB;
    311311
    312312        AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
     
    382382
    383383        /* Set IO and MSR bitmap addresses. */
    384         pVMCB->ctrl.u64IOPMPhysAddr  = pVM->hwaccm.s.svm.pIOBitmapPhys;
    385         pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hwaccm.s.svm.pMSRBitmapPhys;
     384        pVMCB->ctrl.u64IOPMPhysAddr  = pVM->hm.s.svm.pIOBitmapPhys;
     385        pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.pMSRBitmapPhys;
    386386
    387387        /* No LBR virtualization. */
     
    399399
    400400        /* If nested paging is not in use, additional intercepts have to be set up. */
    401         if (!pVM->hwaccm.s.fNestedPaging)
     401        if (!pVM->hm.s.fNestedPaging)
    402402        {
    403403            /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */
     
    448448{
    449449    unsigned ulBit;
    450     uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.svm.pMSRBitmap;
     450    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pMSRBitmap;
    451451
    452452    if (ulMSR <= 0x00001FFF)
     
    498498{
    499499#ifdef VBOX_WITH_STATISTICS
    500     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
     500    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
    501501#endif
    502502
     
    539539     * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
    540540     */
    541     if (pVCpu->hwaccm.s.Event.fPending)
     541    if (pVCpu->hm.s.Event.fPending)
    542542    {
    543543        SVM_EVENT Event;
    544544
    545         Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode,
     545        Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode,
    546546             (RTGCPTR)pCtx->rip));
    547         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    548         Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     547        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
     548        Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    549549        hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event);
    550550
    551         pVCpu->hwaccm.s.Event.fPending = false;
     551        pVCpu->hm.s.Event.fPending = false;
    552552        return VINF_SUCCESS;
    553553    }
     
    614614                    /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    615615                    Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    616                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
     616                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    617617                    /* Just continue */
    618618                }
     
    681681            Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
    682682
    683         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntInject);
     683        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
    684684        hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event);
    685685    } /* if (interrupts can be dispatched) */
     
    724724
    725725    /* Setup AMD SVM. */
    726     Assert(pVM->hwaccm.s.svm.fSupported);
    727 
    728     pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     726    Assert(pVM->hm.s.svm.fSupported);
     727
     728    pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    729729    AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    730730
    731731    /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
    732     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
     732    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
    733733    {
    734734        SVM_WRITE_SELREG(CS, cs);
     
    741741
    742742    /* Guest CPU context: LDTR. */
    743     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     743    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
    744744    {
    745745        SVM_WRITE_SELREG(LDTR, ldtr);
     
    747747
    748748    /* Guest CPU context: TR. */
    749     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     749    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
    750750    {
    751751        SVM_WRITE_SELREG(TR, tr);
     
    753753
    754754    /* Guest CPU context: GDTR. */
    755     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     755    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
    756756    {
    757757        pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
     
    760760
    761761    /* Guest CPU context: IDTR. */
    762     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     762    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
    763763    {
    764764        pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
     
    774774
    775775    /* Control registers */
    776     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     776    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    777777    {
    778778        val = pCtx->cr0;
     
    790790
    791791                /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
    792                 if (!pVCpu->hwaccm.s.fFPUOldStyleOverride)
     792                if (!pVCpu->hm.s.fFPUOldStyleOverride)
    793793                {
    794794                    pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
    795                     pVCpu->hwaccm.s.fFPUOldStyleOverride = true;
     795                    pVCpu->hm.s.fFPUOldStyleOverride = true;
    796796                }
    797797            }
     
    806806         * translation will remain active.
    807807         */
    808         if (!pVM->hwaccm.s.fNestedPaging)
     808        if (!pVM->hm.s.fNestedPaging)
    809809        {
    810810            val |= X86_CR0_PG;  /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
     
    816816    pVMCB->guest.u64CR2 = pCtx->cr2;
    817817
    818     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
     818    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
    819819    {
    820820        /* Save our shadow CR3 register. */
    821         if (pVM->hwaccm.s.fNestedPaging)
     821        if (pVM->hm.s.fNestedPaging)
    822822        {
    823823            PGMMODE enmShwPagingMode;
     
    841841    }
    842842
    843     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     843    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
    844844    {
    845845        val = pCtx->cr4;
    846         if (!pVM->hwaccm.s.fNestedPaging)
    847         {
    848             switch (pVCpu->hwaccm.s.enmShadowMode)
     846        if (!pVM->hm.s.fNestedPaging)
     847        {
     848            switch (pVCpu->hm.s.enmShadowMode)
    849849            {
    850850                case PGMMODE_REAL:
     
    881881
    882882    /* Debug registers. */
    883     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     883    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
    884884    {
    885885        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    916916            && !DBGFIsStepping(pVCpu))
    917917        {
    918             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
     918            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    919919
    920920            /* Disable drx move intercepts. */
     
    948948        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    949949#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    950         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
     950        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
    951951#else
    952952# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    953         if (!pVM->hwaccm.s.fAllow64BitGuests)
     953        if (!pVM->hm.s.fAllow64BitGuests)
    954954            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    955955# endif
    956         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun64;
    957 #endif
    958         /* Unconditionally update these as wrmsr might have changed them. (HWACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
     956        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
     957#endif
     958        /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
    959959        pVMCB->guest.FS.u64Base    = pCtx->fs.u64Base;
    960960        pVMCB->guest.GS.u64Base    = pCtx->gs.u64Base;
     
    965965        pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;
    966966
    967         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun;
     967        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
    968968    }
    969969
     
    976976            pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
    977977            pVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
    978             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
     978            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset);
    979979        }
    980980        else
     
    986986            pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    987987            pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    988             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow);
     988            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
    989989        }
    990990    }
     
    993993        pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    994994        pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    995         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
     995        STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept);
    996996    }
    997997
     
    10131013
    10141014    /* Done. */
    1015     pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     1015    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
    10161016
    10171017    return VINF_SUCCESS;
     
    10321032    AssertPtr(pVCpu);
    10331033
    1034     SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
    1035     pCpu = HWACCMR0GetCurrentCpu();
     1034    SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
     1035    pCpu = HMR0GetCurrentCpu();
    10361036
    10371037    /*
     
    10421042     */
    10431043    bool fNewASID = false;
    1044     if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
    1045         ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1046     {
    1047         pVCpu->hwaccm.s.fForceTLBFlush = true;
     1044    if (    pVCpu->hm.s.idLastCpu   != pCpu->idCpu
     1045        ||  pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1046    {
     1047        pVCpu->hm.s.fForceTLBFlush = true;
    10481048        fNewASID = true;
    10491049    }
     
    10521052     * Set TLB flush state as checked until we return from the world switch.
    10531053     */
    1054     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
     1054    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    10551055
    10561056    /*
     
    10581058     */
    10591059    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    1060         pVCpu->hwaccm.s.fForceTLBFlush = true;
    1061 
    1062     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
     1060        pVCpu->hm.s.fForceTLBFlush = true;
     1061
     1062    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
    10631063    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
    10641064
    1065     if (RT_UNLIKELY(pVM->hwaccm.s.svm.fAlwaysFlushTLB))
     1065    if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB))
    10661066    {
    10671067        /*
     
    10691069         */
    10701070        pCpu->uCurrentASID               = 1;
    1071         pVCpu->hwaccm.s.uCurrentASID     = 1;
    1072         pVCpu->hwaccm.s.cTLBFlushes      = pCpu->cTLBFlushes;
     1071        pVCpu->hm.s.uCurrentASID     = 1;
     1072        pVCpu->hm.s.cTLBFlushes      = pCpu->cTLBFlushes;
    10731073        pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    10741074    }
    1075     else if (pVCpu->hwaccm.s.fForceTLBFlush)
     1075    else if (pVCpu->hm.s.fForceTLBFlush)
    10761076    {
    10771077        if (fNewASID)
     
    10791079            ++pCpu->uCurrentASID;
    10801080            bool fHitASIDLimit = false;
    1081             if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     1081            if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    10821082            {
    10831083                pCpu->uCurrentASID        = 1;  /* start at 1; host uses 0 */
     
    10851085                fHitASIDLimit             = true;
    10861086
    1087                 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1087                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    10881088                {
    10891089                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     
    11001100                && pCpu->fFlushASIDBeforeUse)
    11011101            {
    1102                 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1102                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    11031103                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    11041104                else
     
    11091109            }
    11101110
    1111             pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    1112             pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1111            pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
     1112            pVCpu->hm.s.cTLBFlushes  = pCpu->cTLBFlushes;
    11131113        }
    11141114        else
    11151115        {
    1116             if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1116            if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    11171117                pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    11181118            else
     
    11201120        }
    11211121
    1122         pVCpu->hwaccm.s.fForceTLBFlush = false;
     1122        pVCpu->hm.s.fForceTLBFlush = false;
    11231123    }
    11241124    else
    11251125    {
    11261126        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    1127          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     1127         *        not be executed. See hmQueueInvlPage() where it is commented
    11281128         *        out. Support individual entry flushing someday. */
    11291129        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    11301130        {
    11311131            /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
    1132             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    1133             for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    1134                 SVMR0InvlpgA(pVCpu->hwaccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
    1135         }
    1136     }
    1137 
    1138     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     1132            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
     1133            for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     1134                SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
     1135        }
     1136    }
     1137
     1138    pVCpu->hm.s.TlbShootdown.cPages = 0;
    11391139    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    11401140
    11411141    /* Update VMCB with the ASID. */
    1142     pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
    1143 
    1144     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    1145               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1146     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     1142    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID;
     1143
     1144    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     1145              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1146    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    11471147              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1148     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    1149               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1148    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     1149              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
    11501150
    11511151#ifdef VBOX_WITH_STATISTICS
    11521152    if (pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
    1153         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     1153        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    11541154    else if (   pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
    11551155             || pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
    11561156    {
    1157         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
     1157        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
    11581158    }
    11591159    else
    1160         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     1160        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    11611161#endif
    11621162}
     
    11731173VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    11741174{
    1175     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatEntry, x);
    1176     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit1);
    1177     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit2);
     1175    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     1176    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     1177    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    11781178
    11791179    VBOXSTRICTRC    rc = VINF_SUCCESS;
     
    11941194#endif
    11951195
    1196     pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     1196    pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    11971197    AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    11981198
     
    12011201     */
    12021202ResumeExecution:
    1203     if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hwaccm.s.StatEntry))
    1204         STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);
    1205     Assert(!HWACCMR0SuspendPending());
     1203    if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
     1204        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
     1205    Assert(!HMR0SuspendPending());
    12061206
    12071207    /*
    12081208     * Safety precaution; looping for too long here can have a very bad effect on the host.
    12091209     */
    1210     if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    1211     {
    1212         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     1210    if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
     1211    {
     1212        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    12131213        rc = VINF_EM_RAW_INTERRUPT;
    12141214        goto end;
     
    12561256     * Check for pending actions that force us to go back to ring-3.
    12571257     */
    1258     if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
     1258    if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    12591259        ||  VMCPU_FF_ISPENDING(pVCpu,
    1260                                  VMCPU_FF_HWACCM_TO_R3_MASK
     1260                                 VMCPU_FF_HM_TO_R3_MASK
    12611261                               | VMCPU_FF_PGM_SYNC_CR3
    12621262                               | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
     
    12801280#endif
    12811281        {
    1282             if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
    1283                 ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     1282            if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
     1283                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    12841284            {
    1285                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
     1285                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
    12861286                rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    12871287                goto end;
     
    13261326    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    13271327    {
    1328         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     1328        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
    13291329        rc = VINF_EM_RAW_INTERRUPT;
    13301330        goto end;
     
    13481348    /** @todo query and update the TPR only when it could have been changed (mmio access)
    13491349     */
    1350     if (pVM->hwaccm.s.fHasIoApic)
     1350    if (pVM->hm.s.fHasIoApic)
    13511351    {
    13521352        /* TPR caching in CR8 */
     
    13551355        AssertRC(rc2);
    13561356
    1357         if (pVM->hwaccm.s.fTPRPatchingActive)
     1357        if (pVM->hm.s.fTPRPatchingActive)
    13581358        {
    13591359            /* Our patch code uses LSTAR for TPR caching. */
     
    13991399
    14001400    /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
    1401     pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
     1401    pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
    14021402
    14031403#ifdef LOG_ENABLED
    1404     pCpu = HWACCMR0GetCurrentCpu();
    1405     if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
    1406         LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
    1407     else if (pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1408         LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1404    pCpu = HMR0GetCurrentCpu();
     1405    if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
     1406        LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
     1407    else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1408        LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
    14091409    else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    14101410        LogFlow(("Manual TLB flush\n"));
     
    14381438    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    14391439#endif
    1440     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
     1440    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    14411441
    14421442    /* Setup TLB control and ASID in the VMCB. */
     
    14441444
    14451445    /* In case we execute a goto ResumeExecution later on. */
    1446     pVCpu->hwaccm.s.fResumeVM      = true;
    1447     pVCpu->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
    1448 
    1449     Assert(sizeof(pVCpu->hwaccm.s.svm.pVMCBPhys) == 8);
     1446    pVCpu->hm.s.fResumeVM      = true;
     1447    pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB;
     1448
     1449    Assert(sizeof(pVCpu->hm.s.svm.pVMCBPhys) == 8);
    14501450    Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
    1451     Assert(pVMCB->ctrl.u64IOPMPhysAddr  == pVM->hwaccm.s.svm.pIOBitmapPhys);
    1452     Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hwaccm.s.svm.pMSRBitmapPhys);
     1451    Assert(pVMCB->ctrl.u64IOPMPhysAddr  == pVM->hm.s.svm.pIOBitmapPhys);
     1452    Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.pMSRBitmapPhys);
    14531453    Assert(pVMCB->ctrl.u64LBRVirt == 0);
    14541454
     
    14621462     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    14631463     */
    1464     u32HostExtFeatures = pVM->hwaccm.s.cpuid.u32AMDFeatureEDX;
     1464    u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
    14651465    if (    (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    14661466        && !(pVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
    14671467    {
    1468         pVCpu->hwaccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
     1468        pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    14691469        uint64_t u64GuestTSCAux = 0;
    14701470        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     
    14741474
    14751475#ifdef VBOX_WITH_KERNEL_USING_XMM
    1476     hwaccmR0SVMRunWrapXMM(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,
    1477                           pVCpu->hwaccm.s.svm.pfnVMRun);
     1476    hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,
     1477                          pVCpu->hm.s.svm.pfnVMRun);
    14781478#else
    1479     pVCpu->hwaccm.s.svm.pfnVMRun(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);
    1480 #endif
    1481     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    1482     ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
     1479    pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);
     1480#endif
     1481    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
     1482    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
    14831483    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    14841484    if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
     
    14861486        /* Restore host's TSC_AUX. */
    14871487        if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    1488             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.u64HostTSCAux);
     1488            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
    14891489
    14901490        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() +
     
    14931493    TMNotifyEndOfExecution(pVCpu);
    14941494    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    1495     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);
     1495    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    14961496    ASMSetFlags(uOldEFlags);
    14971497#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     
    15101510    if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID))      /* Invalid guest state. */
    15111511    {
    1512         HWACCMDumpRegs(pVM, pVCpu, pCtx);
     1512        HMDumpRegs(pVM, pVCpu, pCtx);
    15131513#ifdef DEBUG
    15141514        Log(("ctrl.u16InterceptRdCRx            %x\n",      pVMCB->ctrl.u16InterceptRdCRx));
     
    17131713     * unless in the nested paging case where CR3 can be changed by the guest.
    17141714     */
    1715     if (   pVM->hwaccm.s.fNestedPaging
     1715    if (   pVM->hm.s.fNestedPaging
    17161716        && pCtx->cr3 != pVMCB->guest.u64CR3)
    17171717    {
     
    17401740
    17411741    /* Check if an injected event was interrupted prematurely. */
    1742     pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
     1742    pVCpu->hm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
    17431743    if (    pVMCB->ctrl.ExitIntInfo.n.u1Valid
    17441744            /* we don't care about 'int xx' as the instruction will be restarted. */
    17451745        &&  pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)
    17461746    {
    1747         Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
     1747        Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
    17481748
    17491749#ifdef LOG_ENABLED
    17501750        SVM_EVENT Event;
    1751         Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     1751        Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    17521752
    17531753        if (    exitCode == SVM_EXIT_EXCEPTION_E
     
    17581758#endif
    17591759
    1760         pVCpu->hwaccm.s.Event.fPending = true;
     1760        pVCpu->hm.s.Event.fPending = true;
    17611761        /* Error code present? (redundant) */
    17621762        if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
    1763             pVCpu->hwaccm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
     1763            pVCpu->hm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
    17641764        else
    1765             pVCpu->hwaccm.s.Event.errCode  = 0;
     1765            pVCpu->hm.s.Event.errCode  = 0;
    17661766    }
    17671767#ifdef VBOX_WITH_STATISTICS
    17681768    if (exitCode == SVM_EXIT_NPF)
    1769         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
     1769        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF);
    17701770    else
    1771         STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
     1771        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
    17721772#endif
    17731773
     
    17751775    if (fSyncTPR)
    17761776    {
    1777         if (pVM->hwaccm.s.fTPRPatchingActive)
     1777        if (pVM->hm.s.fTPRPatchingActive)
    17781778        {
    17791779            if ((pCtx->msrLSTAR & 0xff) != u8LastTPR)
     
    18041804                            pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);
    18051805#endif
    1806     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);
     1806    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    18071807
    18081808    /* Deal with the reason of the VM-exit. */
     
    18271827        case X86_XCPT_DB:
    18281828        {
    1829             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDB);
     1829            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    18301830
    18311831            /* Note that we don't support guest and host-initiated debugging at the same time. */
     
    18611861            {
    18621862                Assert(CPUMIsGuestFPUStateActive(pVCpu));
    1863                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowNM);
     1863                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    18641864
    18651865                /* Continue execution. */
    1866                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1866                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    18671867
    18681868                goto ResumeExecution;
     
    18701870
    18711871            Log(("Forward #NM fault to the guest\n"));
    1872             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
     1872            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    18731873
    18741874            Event.au64[0]    = 0;
     
    18871887
    18881888#ifdef VBOX_ALWAYS_TRAP_PF
    1889             if (pVM->hwaccm.s.fNestedPaging)
     1889            if (pVM->hm.s.fNestedPaging)
    18901890            {
    18911891                /*
     
    18941894                Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
    18951895                     uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
    1896                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     1896                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    18971897
    18981898                /* Now we must update CR2. */
     
    19101910            }
    19111911#endif
    1912             Assert(!pVM->hwaccm.s.fNestedPaging);
    1913 
    1914 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     1912            Assert(!pVM->hm.s.fNestedPaging);
     1913
     1914#ifdef VBOX_HM_WITH_GUEST_PATCHING
    19151915            /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    1916             if (    pVM->hwaccm.s.fTRPPatchingAllowed
     1916            if (    pVM->hm.s.fTRPPatchingAllowed
    19171917                &&  (uFaultAddress & 0xfff) == 0x080
    19181918                &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    19191919                &&  CPUMGetGuestCPL(pVCpu) == 0
    19201920                &&  !CPUMIsGuestInLongModeEx(pCtx)
    1921                 &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     1921                &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    19221922            {
    19231923                RTGCPHYS GCPhysApicBase, GCPhys;
     
    19301930                {
    19311931                    /* Only attempt to patch the instruction once. */
    1932                     PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     1932                    PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    19331933                    if (!pPatch)
    19341934                    {
    1935                         rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     1935                        rc = VINF_EM_HM_PATCH_TPR_INSTR;
    19361936                        break;
    19371937                    }
     
    19531953                /* We've successfully synced our shadow pages, so let's just continue execution. */
    19541954                Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
    1955                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     1955                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    19561956
    19571957                TRPMResetTrap(pVCpu);
     
    19641964                 */
    19651965                Log2(("Forward page fault to the guest\n"));
    1966                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     1966                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    19671967                /* The error code might have been changed. */
    19681968                errCode = TRPMGetErrorCode(pVCpu);
     
    19941994        case X86_XCPT_MF: /* Floating point exception. */
    19951995        {
    1996             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestMF);
     1996            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    19971997            if (!(pCtx->cr0 & X86_CR0_NE))
    19981998            {
     
    20292029            {
    20302030                case X86_XCPT_GP:
    2031                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
     2031                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    20322032                    Event.n.u1ErrorCodeValid    = 1;
    20332033                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     
    20372037                    break;
    20382038                case X86_XCPT_DE:
    2039                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
     2039                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
    20402040                    break;
    20412041                case X86_XCPT_UD:
    2042                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
     2042                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
    20432043                    break;
    20442044                case X86_XCPT_SS:
    2045                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
     2045                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
    20462046                    Event.n.u1ErrorCodeValid    = 1;
    20472047                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
    20482048                    break;
    20492049                case X86_XCPT_NP:
    2050                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
     2050                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
    20512051                    Event.n.u1ErrorCodeValid    = 1;
    20522052                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     
    20742074        PGMMODE     enmShwPagingMode;
    20752075
    2076         Assert(pVM->hwaccm.s.fNestedPaging);
     2076        Assert(pVM->hm.s.fNestedPaging);
    20772077        LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
    20782078
    2079 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     2079#ifdef VBOX_HM_WITH_GUEST_PATCHING
    20802080        /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    2081         if (    pVM->hwaccm.s.fTRPPatchingAllowed
     2081        if (    pVM->hm.s.fTRPPatchingAllowed
    20822082            &&  (GCPhysFault & PAGE_OFFSET_MASK) == 0x080
    20832083            &&  (   !(errCode & X86_TRAP_PF_P)  /* not present */
     
    20852085            &&  CPUMGetGuestCPL(pVCpu) == 0
    20862086            &&  !CPUMIsGuestInLongModeEx(pCtx)
    2087             &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     2087            &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    20882088        {
    20892089            RTGCPHYS GCPhysApicBase;
     
    20942094            {
    20952095                /* Only attempt to patch the instruction once. */
    2096                 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     2096                PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    20972097                if (!pPatch)
    20982098                {
    2099                     rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     2099                    rc = VINF_EM_HM_PATCH_TPR_INSTR;
    21002100                    break;
    21012101                }
     
    21532153            /* We've successfully synced our shadow pages, so let's just continue execution. */
    21542154            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
    2155             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     2155            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    21562156
    21572157            TRPMResetTrap(pVCpu);
     
    21862186    case SVM_EXIT_WBINVD:
    21872187    case SVM_EXIT_INVD:                 /* Guest software attempted to execute INVD. */
    2188         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvd);
     2188        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    21892189        /* Skip instruction and continue directly. */
    21902190        pCtx->rip += 2;     /* Note! hardcoded opcode size! */
     
    21952195    {
    21962196        Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax));
    2197         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid);
     2197        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    21982198        rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    21992199        if (rc == VINF_SUCCESS)
     
    22112211    {
    22122212        Log2(("SVM: Rdtsc\n"));
    2213         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
     2213        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    22142214        rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    22152215        if (rc == VINF_SUCCESS)
     
    22262226    {
    22272227        Log2(("SVM: Rdpmc %x\n", pCtx->ecx));
    2228         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc);
     2228        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
    22292229        rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    22302230        if (rc == VINF_SUCCESS)
     
    22412241    {
    22422242        Log2(("SVM: Rdtscp\n"));
    2243         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
     2243        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    22442244        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
    22452245        if (rc == VINF_SUCCESS)
     
    22572257    {
    22582258        Log2(("SVM: invlpg\n"));
    2259         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvlpg);
    2260 
    2261         Assert(!pVM->hwaccm.s.fNestedPaging);
     2259        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
     2260
     2261        Assert(!pVM->hm.s.fNestedPaging);
    22622262
    22632263        /* Truly a pita. Why can't SVM give the same information as VT-x? */
     
    22652265        if (rc == VINF_SUCCESS)
    22662266        {
    2267             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageInvlpg);
     2267            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageInvlpg);
    22682268            goto ResumeExecution;   /* eip already updated */
    22692269        }
     
    22772277    {
    22782278        Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));
    2279         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
     2279        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
    22802280        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
    22812281
     
    22832283        {
    22842284            case 0:
    2285                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2285                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    22862286                break;
    22872287            case 2:
    22882288                break;
    22892289            case 3:
    2290                 Assert(!pVM->hwaccm.s.fNestedPaging);
    2291                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     2290                Assert(!pVM->hm.s.fNestedPaging);
     2291                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    22922292                break;
    22932293            case 4:
    2294                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     2294                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    22952295                break;
    22962296            case 8:
     
    23152315    {
    23162316        Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0));
    2317         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
     2317        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
    23182318        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
    23192319        if (rc == VINF_SUCCESS)
     
    23332333    {
    23342334        Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));
    2335         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     2335        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    23362336
    23372337        if (   !DBGFIsStepping(pVCpu)
    23382338            && !CPUMIsHyperDebugStateActive(pVCpu))
    23392339        {
    2340             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     2340            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    23412341
    23422342            /* Disable drx move intercepts. */
     
    23542354        {
    23552355            /* EIP has been updated already. */
    2356             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2356            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    23572357
    23582358            /* Only resume if successful. */
     
    23692369    {
    23702370        Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0));
    2371         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     2371        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    23722372
    23732373        if (!DBGFIsStepping(pVCpu))
    23742374        {
    2375             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     2375            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    23762376
    23772377            /* Disable DRx move intercepts. */
     
    24152415        {
    24162416            /* ins/outs */
    2417             PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
     2417            PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    24182418
    24192419            /* Disassemble manually to deal with segment prefixes. */
     
    24242424                {
    24252425                    Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
    2426                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringWrite);
     2426                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
    24272427                    rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
    24282428                                            (DISCPUMODE)pDis->uAddrMode, uIOSize);
     
    24312431                {
    24322432                    Log2(("IOMInterpretINSEx  %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
    2433                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringRead);
     2433                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
    24342434                    rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
    24352435                                           (DISCPUMODE)pDis->uAddrMode, uIOSize);
     
    24482448                Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
    24492449                      uIOSize));
    2450                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
     2450                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    24512451                rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
    24522452                if (rc == VINF_IOM_R3_IOPORT_WRITE)
    24532453                {
    2454                     HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2454                    HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
    24552455                                                   uAndVal, uIOSize);
    24562456                }
     
    24602460                uint32_t u32Val = 0;
    24612461
    2462                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIORead);
     2462                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    24632463                rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
    24642464                if (IOM_SUCCESS(rc))
     
    24712471                else if (rc == VINF_IOM_R3_IOPORT_READ)
    24722472                {
    2473                     HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2473                    HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
    24742474                                                  uAndVal, uIOSize);
    24752475                }
     
    24932493                    static uint32_t const aIOSize[4] = { 1, 2, 0, 4 };
    24942494
    2495                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
     2495                    STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck);
    24962496                    for (unsigned i = 0; i < 4; i++)
    24972497                    {
     
    25662566    case SVM_EXIT_HLT:
    25672567        /* Check if external interrupts are pending; if so, don't switch back. */
    2568         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     2568        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    25692569        pCtx->rip++;    /* skip hlt */
    25702570        if (EMShouldContinueAfterHalt(pVCpu, pCtx))
     
    25762576    case SVM_EXIT_MWAIT_UNCOND:
    25772577        Log2(("SVM: mwait\n"));
    2578         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMwait);
     2578        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    25792579        rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    25802580        if (    rc == VINF_EM_HALT
     
    25982598        Log2(("SVM: monitor\n"));
    25992599
    2600         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMonitor);
     2600        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    26012601        rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    26022602        if (rc == VINF_SUCCESS)
     
    26442644    {
    26452645        /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
    2646         if (    pVM->hwaccm.s.fTPRPatchingActive
     2646        if (    pVM->hm.s.fTPRPatchingActive
    26472647            &&  pCtx->ecx == MSR_K8_LSTAR
    26482648            &&  pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)
     
    26682668         * so we play safe by completely disassembling the instruction.
    26692669         */
    2670         STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
     2670        STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
    26712671        Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
    26722672        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
     
    26852685        Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2));
    26862686        if (    !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
    2687             &&  pVCpu->hwaccm.s.Event.fPending)
     2687            &&  pVCpu->hm.s.Event.fPending)
    26882688        {
    26892689            SVM_EVENT Event;
    2690             Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     2690            Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    26912691
    26922692            /* Caused by an injected interrupt. */
    2693             pVCpu->hwaccm.s.Event.fPending = false;
     2693            pVCpu->hm.s.Event.fPending = false;
    26942694            switch (Event.n.u3Type)
    26952695            {
     
    27582758    if (exitCode == SVM_EXIT_INTR)
    27592759    {
    2760         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatPendingHostIrq);
     2760        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    27612761        /* On the next entry we'll only sync the host context. */
    2762         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     2762        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
    27632763    }
    27642764    else
     
    27672767        /** @todo we can do better than this */
    27682768        /* Not in the VINF_PGM_CHANGE_MODE though! */
    2769         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     2769        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    27702770    }
    27712771
     
    27832783#endif
    27842784
    2785     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2, x);
    2786     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    2787     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     2785    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     2786    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     2787    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    27882788    return VBOXSTRICTRC_TODO(rc);
    27892789}
     
    28092809        uint8_t u8Tpr;
    28102810
    2811         PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     2811        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    28122812        if (!pPatch)
    28132813            break;
     
    28152815        switch (pPatch->enmType)
    28162816        {
    2817             case HWACCMTPRINSTR_READ:
     2817            case HMTPRINSTR_READ:
    28182818                /* TPR caching in CR8 */
    28192819                rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending);
     
    28272827                break;
    28282828
    2829             case HWACCMTPRINSTR_WRITE_REG:
    2830             case HWACCMTPRINSTR_WRITE_IMM:
     2829            case HMTPRINSTR_WRITE_REG:
     2830            case HMTPRINSTR_WRITE_IMM:
    28312831                /* Fetch the new TPR value */
    2832                 if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG)
     2832                if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
    28332833                {
    28342834                    uint32_t val;
     
    28652865VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    28662866{
    2867     Assert(pVM->hwaccm.s.svm.fSupported);
    2868 
    2869     LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hwaccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));
    2870     pVCpu->hwaccm.s.fResumeVM = false;
     2867    Assert(pVM->hm.s.svm.fSupported);
     2868
     2869    LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentASID));
     2870    pVCpu->hm.s.fResumeVM = false;
    28712871
    28722872    /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
    2873     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
     2873    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_LDTR;
    28742874
    28752875    return VINF_SUCCESS;
     
    28872887VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    28882888{
    2889     SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
    2890 
    2891     Assert(pVM->hwaccm.s.svm.fSupported);
     2889    SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
     2890
     2891    Assert(pVM->hm.s.svm.fSupported);
    28922892
    28932893#ifdef DEBUG
     
    29082908
    29092909        /* Resync the debug registers the next time. */
    2910         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2910        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    29112911    }
    29122912    else
     
    29792979    if (CPUMGetGuestCodeBits(pVCpu) != 16)
    29802980    {
    2981         PDISSTATE pDis = &pVCpu->hwaccm.s.DisState;
     2981        PDISSTATE pDis = &pVCpu->hm.s.DisState;
    29822982        int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
    29832983        if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG)
     
    30033003VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    30043004{
    3005     bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
     3005    bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
    30063006
    30073007    /* Skip it if a TLB flush is already pending. */
     
    30123012        Log2(("SVMR0InvalidatePage %RGv\n", GCVirt));
    30133013        AssertReturn(pVM, VERR_INVALID_PARAMETER);
    3014         Assert(pVM->hwaccm.s.svm.fSupported);
    3015 
    3016         pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     3014        Assert(pVM->hm.s.svm.fSupported);
     3015
     3016        pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    30173017        AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    30183018
     
    30403040VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    30413041{
    3042     Assert(pVM->hwaccm.s.fNestedPaging);
     3042    Assert(pVM->hm.s.fNestedPaging);
    30433043    /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
    30443044    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    3045     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBInvlpga);
     3045    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBInvlpga);
    30463046    return VINF_SUCCESS;
    30473047}
     
    30693069    aParam[3] = (uint32_t)(pVMCBPhys >> 32);                /* Param 2: pVMCBPhys - Hi. */
    30703070
    3071     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
     3071    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
    30723072}
    30733073
     
    31053105        CPUMPushHyper(pVCpu, paParam[i]);
    31063106
    3107     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     3107    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    31083108    /* Call switcher. */
    3109     rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
    3110     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     3109    rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     3110    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    31113111
    31123112    ASMSetFlags(uOldEFlags);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r43307 r43387  
    2424#include <VBox/vmm/stam.h>
    2525#include <VBox/dis.h>
    26 #include <VBox/vmm/hwaccm.h>
     26#include <VBox/vmm/hm.h>
    2727#include <VBox/vmm/pgm.h>
    28 #include <VBox/vmm/hwacc_svm.h>
     28#include <VBox/vmm/hm_svm.h>
    2929
    3030RT_C_DECLS_BEGIN
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r43379 r43387  
    2020*   Header Files                                                               *
    2121*******************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_HWACCM
     22#define LOG_GROUP LOG_GROUP_HM
    2323#include <iprt/asm-amd64-x86.h>
    24 #include <VBox/vmm/hwaccm.h>
     24#include <VBox/vmm/hm.h>
    2525#include <VBox/vmm/pgm.h>
    2626#include <VBox/vmm/dbgf.h>
     
    3232#endif
    3333#include <VBox/vmm/tm.h>
    34 #include "HWACCMInternal.h"
     34#include "HMInternal.h"
    3535#include <VBox/vmm/vm.h>
    3636#include <VBox/vmm/pdmapi.h>
     
    7070
    7171#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    72 /** See HWACCMR0A.asm. */
     72/** See HMR0A.asm. */
    7373extern "C" uint32_t g_fVMXIs64bitHost;
    7474#endif
     
    9090
    9191/**
    92  * Updates error from VMCS to HWACCMCPU's lasterror record.
     92 * Updates error from VMCS to HMCPU's lasterror record.
    9393 *
    9494 * @param    pVM            Pointer to the VM.
     
    103103
    104104        VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
    105         pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
    106     }
    107     pVM->hwaccm.s.lLastError = rc;
     105        pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError;
     106    }
     107    pVM->hm.s.lLastError = rc;
    108108}
    109109
     
    130130        {
    131131            /* Set revision dword at the beginning of the VMXON structure. */
    132             *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     132            *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
    133133        }
    134134
     
    165165     */
    166166    if (   pVM
    167         && pVM->hwaccm.s.vmx.fVPID
    168         && (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
     167        && pVM->hm.s.vmx.fVPID
     168        && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
    169169    {
    170170        hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
     
    224224#endif
    225225
    226     pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
    227 
    228     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     226    pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
     227
     228    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    229229    {
    230230        /* Allocate one page for the APIC physical page (serves for filtering accesses). */
    231         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);
     231        rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);
    232232        AssertRC(rc);
    233233        if (RT_FAILURE(rc))
    234234            return rc;
    235235
    236         pVM->hwaccm.s.vmx.pAPIC     = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjAPIC);
    237         pVM->hwaccm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjAPIC, 0);
    238         ASMMemZero32(pVM->hwaccm.s.vmx.pAPIC, PAGE_SIZE);
     236        pVM->hm.s.vmx.pAPIC     = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjAPIC);
     237        pVM->hm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjAPIC, 0);
     238        ASMMemZero32(pVM->hm.s.vmx.pAPIC, PAGE_SIZE);
    239239    }
    240240    else
    241241    {
    242         pVM->hwaccm.s.vmx.pMemObjAPIC = 0;
    243         pVM->hwaccm.s.vmx.pAPIC       = 0;
    244         pVM->hwaccm.s.vmx.pAPICPhys   = 0;
     242        pVM->hm.s.vmx.pMemObjAPIC = 0;
     243        pVM->hm.s.vmx.pAPIC       = 0;
     244        pVM->hm.s.vmx.pAPICPhys   = 0;
    245245    }
    246246
    247247#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    248248    {
    249         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);
     249        rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);
    250250        AssertRC(rc);
    251251        if (RT_FAILURE(rc))
    252252            return rc;
    253253
    254         pVM->hwaccm.s.vmx.pScratch     = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjScratch);
    255         pVM->hwaccm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjScratch, 0);
    256 
    257         ASMMemZero32(pVM->hwaccm.s.vmx.pScratch, PAGE_SIZE);
    258         strcpy((char *)pVM->hwaccm.s.vmx.pScratch, "SCRATCH Magic");
    259         *(uint64_t *)(pVM->hwaccm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
     254        pVM->hm.s.vmx.pScratch     = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjScratch);
     255        pVM->hm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjScratch, 0);
     256
     257        ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE);
     258        strcpy((char *)pVM->hm.s.vmx.pScratch, "SCRATCH Magic");
     259        *(uint64_t *)(pVM->hm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
    260260    }
    261261#endif
     
    266266        PVMCPU pVCpu = &pVM->aCpus[i];
    267267
    268         pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
     268        pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
    269269
    270270        /* Allocate one page for the VM control structure (VMCS). */
    271         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);
     271        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);
    272272        AssertRC(rc);
    273273        if (RT_FAILURE(rc))
    274274            return rc;
    275275
    276         pVCpu->hwaccm.s.vmx.pvVMCS     = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVMCS);
    277         pVCpu->hwaccm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVMCS, 0);
    278         ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pvVMCS);
    279 
    280         pVCpu->hwaccm.s.vmx.cr0_mask = 0;
    281         pVCpu->hwaccm.s.vmx.cr4_mask = 0;
     276        pVCpu->hm.s.vmx.pvVMCS     = RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVMCS);
     277        pVCpu->hm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVMCS, 0);
     278        ASMMemZeroPage(pVCpu->hm.s.vmx.pvVMCS);
     279
     280        pVCpu->hm.s.vmx.cr0_mask = 0;
     281        pVCpu->hm.s.vmx.cr4_mask = 0;
    282282
    283283        /* Allocate one page for the virtual APIC page for TPR caching. */
    284         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);
     284        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);
    285285        AssertRC(rc);
    286286        if (RT_FAILURE(rc))
    287287            return rc;
    288288
    289         pVCpu->hwaccm.s.vmx.pbVAPIC     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVAPIC);
    290         pVCpu->hwaccm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, 0);
    291         ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pbVAPIC);
     289        pVCpu->hm.s.vmx.pbVAPIC     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVAPIC);
     290        pVCpu->hm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVAPIC, 0);
     291        ASMMemZeroPage(pVCpu->hm.s.vmx.pbVAPIC);
    292292
    293293        /* Allocate the MSR bitmap if this feature is supported. */
    294         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    295         {
    296             rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);
     294        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     295        {
     296            rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);
    297297            AssertRC(rc);
    298298            if (RT_FAILURE(rc))
    299299                return rc;
    300300
    301             pVCpu->hwaccm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);
    302             pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
    303             memset(pVCpu->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
     301            pVCpu->hm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjMSRBitmap);
     302            pVCpu->hm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjMSRBitmap, 0);
     303            memset(pVCpu->hm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
    304304        }
    305305
    306306#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    307307        /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
    308         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);
     308        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);
    309309        AssertRC(rc);
    310310        if (RT_FAILURE(rc))
    311311            return rc;
    312312
    313         pVCpu->hwaccm.s.vmx.pGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);
    314         pVCpu->hwaccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);
    315         Assert(!(pVCpu->hwaccm.s.vmx.pGuestMSRPhys & 0xf));
    316         memset(pVCpu->hwaccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
     313        pVCpu->hm.s.vmx.pGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjGuestMSR);
     314        pVCpu->hm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjGuestMSR, 0);
     315        Assert(!(pVCpu->hm.s.vmx.pGuestMSRPhys & 0xf));
     316        memset(pVCpu->hm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
    317317
    318318        /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
    319         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);
     319        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);
    320320        AssertRC(rc);
    321321        if (RT_FAILURE(rc))
    322322            return rc;
    323323
    324         pVCpu->hwaccm.s.vmx.pHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);
    325         pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);
    326         Assert(!(pVCpu->hwaccm.s.vmx.pHostMSRPhys & 0xf));
    327         memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE);
     324        pVCpu->hm.s.vmx.pHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjHostMSR);
     325        pVCpu->hm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjHostMSR, 0);
     326        Assert(!(pVCpu->hm.s.vmx.pHostMSRPhys & 0xf));
     327        memset(pVCpu->hm.s.vmx.pHostMSR, 0, PAGE_SIZE);
    328328#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    329329
    330330        /* Current guest paging mode. */
    331         pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
     331        pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
    332332
    333333#ifdef LOG_ENABLED
    334         SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hwaccm.s.vmx.pvVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     334        SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hm.s.vmx.pvVMCS, (uint32_t)pVCpu->hm.s.vmx.HCPhysVMCS);
    335335#endif
    336336    }
     
    352352        PVMCPU pVCpu = &pVM->aCpus[i];
    353353
    354         if (pVCpu->hwaccm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)
    355         {
    356             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVMCS, false);
    357             pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
    358             pVCpu->hwaccm.s.vmx.pvVMCS      = 0;
    359             pVCpu->hwaccm.s.vmx.HCPhysVMCS  = 0;
    360         }
    361         if (pVCpu->hwaccm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)
    362         {
    363             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, false);
    364             pVCpu->hwaccm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;
    365             pVCpu->hwaccm.s.vmx.pbVAPIC      = 0;
    366             pVCpu->hwaccm.s.vmx.HCPhysVAPIC  = 0;
    367         }
    368         if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    369         {
    370             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, false);
    371             pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    372             pVCpu->hwaccm.s.vmx.pMSRBitmap       = 0;
    373             pVCpu->hwaccm.s.vmx.pMSRBitmapPhys   = 0;
     354        if (pVCpu->hm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)
     355        {
     356            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVMCS, false);
     357            pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
     358            pVCpu->hm.s.vmx.pvVMCS      = 0;
     359            pVCpu->hm.s.vmx.HCPhysVMCS  = 0;
     360        }
     361        if (pVCpu->hm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)
     362        {
     363            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVAPIC, false);
     364            pVCpu->hm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;
     365            pVCpu->hm.s.vmx.pbVAPIC      = 0;
     366            pVCpu->hm.s.vmx.HCPhysVAPIC  = 0;
     367        }
     368        if (pVCpu->hm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
     369        {
     370            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjMSRBitmap, false);
     371            pVCpu->hm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     372            pVCpu->hm.s.vmx.pMSRBitmap       = 0;
     373            pVCpu->hm.s.vmx.pMSRBitmapPhys   = 0;
    374374        }
    375375#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    376         if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
    377         {
    378             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, false);
    379             pVCpu->hwaccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
    380             pVCpu->hwaccm.s.vmx.pHostMSR       = 0;
    381             pVCpu->hwaccm.s.vmx.pHostMSRPhys   = 0;
    382         }
    383         if (pVCpu->hwaccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
    384         {
    385             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, false);
    386             pVCpu->hwaccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
    387             pVCpu->hwaccm.s.vmx.pGuestMSR       = 0;
    388             pVCpu->hwaccm.s.vmx.pGuestMSRPhys   = 0;
     376        if (pVCpu->hm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
     377        {
     378            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjHostMSR, false);
     379            pVCpu->hm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
     380            pVCpu->hm.s.vmx.pHostMSR       = 0;
     381            pVCpu->hm.s.vmx.pHostMSRPhys   = 0;
     382        }
     383        if (pVCpu->hm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
     384        {
     385            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjGuestMSR, false);
     386            pVCpu->hm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
     387            pVCpu->hm.s.vmx.pGuestMSR       = 0;
     388            pVCpu->hm.s.vmx.pGuestMSRPhys   = 0;
    389389        }
    390390#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    391391    }
    392     if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
    393     {
    394         RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjAPIC, false);
    395         pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
    396         pVM->hwaccm.s.vmx.pAPIC       = 0;
    397         pVM->hwaccm.s.vmx.pAPICPhys   = 0;
     392    if (pVM->hm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
     393    {
     394        RTR0MemObjFree(pVM->hm.s.vmx.pMemObjAPIC, false);
     395        pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
     396        pVM->hm.s.vmx.pAPIC       = 0;
     397        pVM->hm.s.vmx.pAPICPhys   = 0;
    398398    }
    399399#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    400     if (pVM->hwaccm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)
    401     {
    402         ASMMemZero32(pVM->hwaccm.s.vmx.pScratch, PAGE_SIZE);
    403         RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjScratch, false);
    404         pVM->hwaccm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;
    405         pVM->hwaccm.s.vmx.pScratch       = 0;
    406         pVM->hwaccm.s.vmx.pScratchPhys   = 0;
     400    if (pVM->hm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)
     401    {
     402        ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE);
     403        RTR0MemObjFree(pVM->hm.s.vmx.pMemObjScratch, false);
     404        pVM->hm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;
     405        pVM->hm.s.vmx.pScratch       = 0;
     406        pVM->hm.s.vmx.pScratchPhys   = 0;
    407407    }
    408408#endif
     
    424424    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    425425
    426     /* Initialize these always, see hwaccmR3InitFinalizeR0().*/
    427     pVM->hwaccm.s.vmx.enmFlushEPT  = VMX_FLUSH_EPT_NONE;
    428     pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;
     426    /* Initialize these always, see hmR3InitFinalizeR0().*/
     427    pVM->hm.s.vmx.enmFlushEPT  = VMX_FLUSH_EPT_NONE;
     428    pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;
    429429
    430430    /* Determine optimal flush type for EPT. */
    431     if (pVM->hwaccm.s.fNestedPaging)
    432     {
    433         if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
    434         {
    435             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
    436                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;
    437             else if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
    438                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;
     431    if (pVM->hm.s.fNestedPaging)
     432    {
     433        if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
     434        {
     435            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
     436                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;
     437            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
     438                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;
    439439            else
    440440            {
     
    443443                 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution.
    444444                 */
    445                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     445                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
    446446                return VERR_VMX_GENERIC;
    447447            }
     
    452452             * Should never really happen. EPT is supported but INVEPT instruction is not supported.
    453453             */
    454             pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     454            pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
    455455            return VERR_VMX_GENERIC;
    456456        }
     
    458458
    459459    /* Determine optimal flush type for VPID. */
    460     if (pVM->hwaccm.s.vmx.fVPID)
    461     {
    462         if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
    463         {
    464             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    465                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;
    466             else if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
    467                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;
     460    if (pVM->hm.s.vmx.fVPID)
     461    {
     462        if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
     463        {
     464            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
     465                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;
     466            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
     467                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;
    468468            else
    469469            {
     
    472472                 * We do not handle other flush type combinations, ignore VPID capabilities.
    473473                 */
    474                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     474                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    475475                    Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_INDIV_ADDR supported. Ignoring VPID.\n"));
    476                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
     476                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
    477477                    Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    478                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    479                 pVM->hwaccm.s.vmx.fVPID = false;
     478                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
     479                pVM->hm.s.vmx.fVPID = false;
    480480            }
    481481        }
     
    487487             */
    488488            Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n"));
    489             pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    490             pVM->hwaccm.s.vmx.fVPID = false;
     489            pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
     490            pVM->hm.s.vmx.fVPID = false;
    491491        }
    492492    }
     
    496496        PVMCPU pVCpu = &pVM->aCpus[i];
    497497
    498         AssertPtr(pVCpu->hwaccm.s.vmx.pvVMCS);
     498        AssertPtr(pVCpu->hm.s.vmx.pvVMCS);
    499499
    500500        /* Set revision dword at the beginning of the VMCS structure. */
    501         *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     501        *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
    502502
    503503        /*
    504504         * Clear and activate the VMCS.
    505505         */
    506         Log(("HCPhysVMCS  = %RHp\n", pVCpu->hwaccm.s.vmx.HCPhysVMCS));
    507         rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     506        Log(("HCPhysVMCS  = %RHp\n", pVCpu->hm.s.vmx.HCPhysVMCS));
     507        rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    508508        if (RT_FAILURE(rc))
    509509            goto vmx_end;
    510510
    511         rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     511        rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    512512        if (RT_FAILURE(rc))
    513513            goto vmx_end;
     
    517517         * Set required bits to one and zero according to the MSR capabilities.
    518518         */
    519         val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
     519        val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    520520        val |=    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT      /* External interrupts */
    521521                | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;         /* Non-maskable interrupts */
     
    524524         * Enable the VMX preemption timer.
    525525         */
    526         if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
     526        if (pVM->hm.s.vmx.fUsePreemptTimer)
    527527            val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
    528         val &= pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
     528        val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    529529
    530530        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val);
     
    535535         * Set required bits to one and zero according to the MSR capabilities.
    536536         */
    537         val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
     537        val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    538538        /* Program which event cause VM-exits and which features we want to use. */
    539539        val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
     
    547547
    548548        /* Without nested paging we should intercept invlpg and cr3 mov instructions. */
    549         if (!pVM->hwaccm.s.fNestedPaging)
     549        if (!pVM->hm.s.fNestedPaging)
    550550        {
    551551            val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     
    558558         * failure with an invalid control fields error. (combined with some other exit reasons)
    559559         */
    560         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     560        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    561561        {
    562562            /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */
    563563            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;
    564             Assert(pVM->hwaccm.s.vmx.pAPIC);
     564            Assert(pVM->hm.s.vmx.pAPIC);
    565565        }
    566566        else
     
    568568            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
    569569
    570         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    571         {
    572             Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
     570        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     571        {
     572            Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys);
    573573            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
    574574        }
     
    579579        /* Mask away the bits that the CPU doesn't support */
    580580        /** @todo make sure they don't conflict with the above requirements. */
    581         val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
    582         pVCpu->hwaccm.s.vmx.proc_ctls = val;
     581        val &= pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
     582        pVCpu->hm.s.vmx.proc_ctls = val;
    583583
    584584        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val);
    585585        AssertRC(rc);
    586586
    587         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     587        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    588588        {
    589589            /*
     
    591591             * Set required bits to one and zero according to the MSR capabilities.
    592592             */
    593             val  = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
     593            val  = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
    594594            val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
    595595
    596             if (pVM->hwaccm.s.fNestedPaging)
     596            if (pVM->hm.s.fNestedPaging)
    597597                val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
    598598
    599             if (pVM->hwaccm.s.vmx.fVPID)
     599            if (pVM->hm.s.vmx.fVPID)
    600600                val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
    601601
    602             if (pVM->hwaccm.s.fHasIoApic)
     602            if (pVM->hm.s.fHasIoApic)
    603603                val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
    604604
    605             if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     605            if (pVM->hm.s.vmx.fUnrestrictedGuest)
    606606                val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE;
    607607
    608             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     608            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    609609                val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
    610610
    611611            /* Mask away the bits that the CPU doesn't support */
    612612            /** @todo make sure they don't conflict with the above requirements. */
    613             val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
    614             pVCpu->hwaccm.s.vmx.proc_ctls2 = val;
     613            val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
     614            pVCpu->hm.s.vmx.proc_ctls2 = val;
    615615            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val);
    616616            AssertRC(rc);
     
    656656         * Set the MSR bitmap address.
    657657         */
    658         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    659         {
    660             Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
    661 
    662             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
     658        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     659        {
     660            Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys);
     661
     662            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.pMSRBitmapPhys);
    663663            AssertRC(rc);
    664664
     
    676676            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
    677677            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
    678             if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     678            if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    679679                hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true);
    680680        }
     
    684684         * Set the guest & host MSR load/store physical addresses.
    685685         */
    686         Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
    687         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     686        Assert(pVCpu->hm.s.vmx.pGuestMSRPhys);
     687        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys);
    688688        AssertRC(rc);
    689         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     689        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys);
    690690        AssertRC(rc);
    691         Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
    692         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hwaccm.s.vmx.pHostMSRPhys);
     691        Assert(pVCpu->hm.s.vmx.pHostMSRPhys);
     692        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.pHostMSRPhys);
    693693        AssertRC(rc);
    694694#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
     
    701701        AssertRC(rc);
    702702
    703         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    704         {
    705             Assert(pVM->hwaccm.s.vmx.pMemObjAPIC);
     703        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     704        {
     705            Assert(pVM->hm.s.vmx.pMemObjAPIC);
    706706            /* Optional */
    707707            rc  = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0);
    708             rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx.HCPhysVAPIC);
    709 
    710             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    711                 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys);
     708            rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVAPIC);
     709
     710            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     711                rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.pAPICPhys);
    712712
    713713            AssertRC(rc);
     
    722722         * VMCS data back to memory.
    723723         */
    724         rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     724        rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    725725        AssertRC(rc);
    726726
     
    728728         * Configure the VMCS read cache.
    729729         */
    730         PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     730        PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    731731
    732732        VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_RIP);
     
    769769        VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_IDT_ERRCODE);
    770770
    771         if (pVM->hwaccm.s.fNestedPaging)
     771        if (pVM->hm.s.fNestedPaging)
    772772        {
    773773            VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_CR3);
     
    782782     * Setup the right TLB function based on CPU capabilities.
    783783     */
    784     if (pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)
    785         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
    786     else if (pVM->hwaccm.s.fNestedPaging)
    787         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
    788     else if (pVM->hwaccm.s.vmx.fVPID)
    789         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
     784    if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID)
     785        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
     786    else if (pVM->hm.s.fNestedPaging)
     787        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
     788    else if (pVM->hm.s.vmx.fVPID)
     789        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
    790790    else
    791         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
     791        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
    792792
    793793vmx_end:
     
    808808{
    809809    unsigned ulBit;
    810     uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
     810    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pMSRBitmap;
    811811
    812812    /*
     
    867867
    868868#ifdef VBOX_WITH_STATISTICS
    869     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);
     869    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);
    870870#endif
    871871
     
    892892
    893893    if (    CPUMIsGuestInRealModeEx(pCtx)
    894         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     894        &&  pVM->hm.s.vmx.pRealModeTSS)
    895895    {
    896896        RTGCPHYS GCPhysHandler;
     
    967967        pCtx->eflags.u     &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
    968968
    969         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;
     969        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;
    970970        return VINF_SUCCESS;
    971971    }
     
    998998     * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely).
    999999     */
    1000     if (pVCpu->hwaccm.s.Event.fPending)
    1001     {
    1002         Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo,
    1003              pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
    1004         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    1005         rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);
     1000    if (pVCpu->hm.s.Event.fPending)
     1001    {
     1002        Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.intInfo,
     1003             pVCpu->hm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
     1004        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
     1005        rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hm.s.Event.intInfo, 0, pVCpu->hm.s.Event.errCode);
    10061006        AssertRC(rc);
    10071007
    1008         pVCpu->hwaccm.s.Event.fPending = false;
     1008        pVCpu->hm.s.Event.fPending = false;
    10091009        return VINF_SUCCESS;
    10101010    }
     
    10401040            if (!(pCtx->eflags.u32 & X86_EFL_IF))
    10411041            {
    1042                 if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))
     1042                if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))
    10431043                {
    10441044                    LogFlow(("Enable irq window exit!\n"));
    1045                     pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    1046                     rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     1045                    pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
     1046                    rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    10471047                    AssertRC(rc);
    10481048                }
     
    10651065                    /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    10661066                    Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    1067                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
     1067                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    10681068                    /* Just continue */
    10691069                }
     
    11421142            intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    11431143
    1144         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntInject);
     1144        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
    11451145        rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode);
    11461146        AssertRC(rc);
     
    11661166     * Host CPU Context.
    11671167     */
    1168     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     1168    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
    11691169    {
    11701170        RTIDTR      idtr;
     
    11851185        if (VMX_IS_64BIT_HOST_MODE())
    11861186        {
    1187             cr3 = hwaccmR0Get64bitCR3();
     1187            cr3 = hmR0Get64bitCR3();
    11881188            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3,     cr3);
    11891189        }
     
    12501250        {
    12511251            X86XDTR64 gdtr64, idtr64;
    1252             hwaccmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);
     1252            hmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);
    12531253            rc  = VMXWriteVMCS64(VMX_VMCS_HOST_GDTR_BASE, gdtr64.uAddr);
    12541254            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_IDTR_BASE, gdtr64.uAddr);
     
    13561356         * the world switch back to the host.
    13571357         */
    1358         PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
     1358        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pHostMSR;
    13591359        unsigned idxMsr = 0;
    13601360
     
    14041404# endif
    14051405
    1406         if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     1406        if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    14071407        {
    14081408            pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
     
    14181418#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    14191419
    1420         pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
     1420        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
    14211421    }
    14221422    return rc;
     
    15151515     */
    15161516    /** @todo NP state won't change so maybe we should build the initial trap mask up front? */
    1517     if (!pVM->hwaccm.s.fNestedPaging)
     1517    if (!pVM->hm.s.fNestedPaging)
    15181518        u32TrapMask |= RT_BIT(X86_XCPT_PF);
    15191519
     
    15311531    /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */
    15321532    if (    CPUMIsGuestInRealModeEx(pCtx)
    1533         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1533        &&  pVM->hm.s.vmx.pRealModeTSS)
    15341534    {
    15351535        u32TrapMask |=   RT_BIT(X86_XCPT_DE)
     
    15721572    X86EFLAGS   eflags;
    15731573
    1574     Assert(!(pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST));
     1574    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST));
    15751575
    15761576    /*
     
    15921592     */
    15931593    if (    CPUMIsGuestInRealModeEx(pCtx)
    1594         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
    1595     {
    1596         pVCpu->hwaccm.s.vmx.RealMode.eflags = eflags;
     1594        &&  pVM->hm.s.vmx.pRealModeTSS)
     1595    {
     1596        pVCpu->hm.s.vmx.RealMode.eflags = eflags;
    15971597
    15981598        eflags.Bits.u1VM   = 1;
     
    16231623     * Set required bits to one and zero according to the MSR capabilities.
    16241624     */
    1625     val  = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
     1625    val  = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
    16261626
    16271627    /*
     
    16381638     * Mask away the bits that the CPU doesn't support.
    16391639     */
    1640     val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
     1640    val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    16411641    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
    16421642    AssertRC(rc);
     
    16461646     * Set required bits to one and zero according to the MSR capabilities.
    16471647     */
    1648     val  = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
     1648    val  = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
    16491649
    16501650    /*
     
    16641664        Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64));
    16651665#endif
    1666     val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
     1666    val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
    16671667
    16681668    /*
     
    16751675     * Guest CPU context: ES, CS, SS, DS, FS, GS.
    16761676     */
    1677     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
    1678     {
    1679         if (pVM->hwaccm.s.vmx.pRealModeTSS)
     1677    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
     1678    {
     1679        if (pVM->hm.s.vmx.pRealModeTSS)
    16801680        {
    16811681            PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
    1682             if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
     1682            if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
    16831683            {
    16841684                /*
    16851685                 * Correct weird requirements for switching to protected mode.
    16861686                 */
    1687                 if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
     1687                if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    16881688                    &&  enmGuestMode >= PGMMODE_PROTECTED)
    16891689                {
     
    17081708                    pCtx->ss.Attr.n.u2Dpl  = 0;
    17091709                }
    1710                 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
     1710                pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
    17111711            }
    17121712            else if (   CPUMIsGuestInRealModeEx(pCtx)
     
    17411741     * Guest CPU context: LDTR.
    17421742     */
    1743     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     1743    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
    17441744    {
    17451745        if (pCtx->ldtr.Sel == 0)
     
    17641764     * Guest CPU context: TR.
    17651765     */
    1766     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     1766    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
    17671767    {
    17681768        /*
     
    17711771         */
    17721772        if (    CPUMIsGuestInRealModeEx(pCtx)
    1773             &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1773            &&  pVM->hm.s.vmx.pRealModeTSS)
    17741774        {
    17751775            RTGCPHYS GCPhys;
    17761776
    17771777            /* We convert it here every time as PCI regions could be reconfigured. */
    1778             rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
     1778            rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
    17791779            AssertRC(rc);
    17801780
    17811781            rc =  VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR,         0);
    1782             rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT,         HWACCM_VTX_TSS_SIZE);
     1782            rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT,         HM_VTX_TSS_SIZE);
    17831783            rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE,          GCPhys /* phys = virt in this mode */);
    17841784
     
    18171817     * Guest CPU context: GDTR.
    18181818     */
    1819     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     1819    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
    18201820    {
    18211821        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT,       pCtx->gdtr.cbGdt);
     
    18271827     * Guest CPU context: IDTR.
    18281828     */
    1829     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     1829    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
    18301830    {
    18311831        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT,       pCtx->idtr.cbIdt);
     
    18371837     * Sysenter MSRs.
    18381838     */
    1839     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)
     1839    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
    18401840    {
    18411841        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS,    pCtx->SysEnter.cs);
     
    18481848     * Guest CPU context: Control registers.
    18491849     */
    1850     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     1850    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    18511851    {
    18521852        val = pCtx->cr0;
     
    18671867        }
    18681868        /* Protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
    1869         if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1869        if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    18701870            val |= X86_CR0_PE | X86_CR0_PG;
    18711871
    1872         if (pVM->hwaccm.s.fNestedPaging)
     1872        if (pVM->hm.s.fNestedPaging)
    18731873        {
    18741874            if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
    18751875            {
    18761876                /* Disable CR3 read/write monitoring as we don't need it for EPT. */
    1877                 pVCpu->hwaccm.s.vmx.proc_ctls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     1877                pVCpu->hm.s.vmx.proc_ctls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    18781878                                                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
    18791879            }
     
    18811881            {
    18821882                /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */
    1883                 pVCpu->hwaccm.s.vmx.proc_ctls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     1883                pVCpu->hm.s.vmx.proc_ctls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    18841884                                                 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
    18851885            }
    1886             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     1886            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    18871887            AssertRC(rc);
    18881888        }
     
    19151915            val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_MP;
    19161916
    1917         pVCpu->hwaccm.s.vmx.cr0_mask = val;
     1917        pVCpu->hm.s.vmx.cr0_mask = val;
    19181918
    19191919        rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val);
     
    19221922    }
    19231923
    1924     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     1924    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
    19251925    {
    19261926        rc  = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW,   pCtx->cr4);
    19271927        Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
    19281928        /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
    1929         val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
    1930 
    1931         if (!pVM->hwaccm.s.fNestedPaging)
    1932         {
    1933             switch (pVCpu->hwaccm.s.enmShadowMode)
     1929        val = pCtx->cr4 | (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
     1930
     1931        if (!pVM->hm.s.fNestedPaging)
     1932        {
     1933            switch (pVCpu->hm.s.enmShadowMode)
    19341934            {
    19351935                case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
     
    19591959        }
    19601960        else if (   !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    1961                  && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1961                 && !pVM->hm.s.vmx.fUnrestrictedGuest)
    19621962        {
    19631963            /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */
     
    19711971         */
    19721972        if (    CPUMIsGuestInRealModeEx(pCtx)
    1973             &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1973            &&  pVM->hm.s.vmx.pRealModeTSS)
    19741974        {
    19751975            val &= ~X86_CR4_VME;
     
    19881988              | X86_CR4_PSE
    19891989              | X86_CR4_VMXE;
    1990         pVCpu->hwaccm.s.vmx.cr4_mask = val;
     1990        pVCpu->hm.s.vmx.cr4_mask = val;
    19911991
    19921992        rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val);
     
    19971997#if 0
    19981998    /* Enable single stepping if requested and CPU supports it. */
    1999     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
     1999    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
    20002000        if (DBGFIsStepping(pVCpu))
    20012001        {
    2002             pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    2003             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2002            pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
     2003            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    20042004            AssertRC(rc);
    20052005        }
    20062006#endif
    20072007
    2008     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
    2009     {
    2010         if (pVM->hwaccm.s.fNestedPaging)
     2008    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
     2009    {
     2010        if (pVM->hm.s.fNestedPaging)
    20112011        {
    20122012            Assert(PGMGetHyperCR3(pVCpu));
    2013             pVCpu->hwaccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
    2014 
    2015             Assert(!(pVCpu->hwaccm.s.vmx.GCPhysEPTP & 0xfff));
     2013            pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
     2014
     2015            Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff));
    20162016            /** @todo Check the IA32_VMX_EPT_VPID_CAP MSR for other supported memory types. */
    2017             pVCpu->hwaccm.s.vmx.GCPhysEPTP |=   VMX_EPT_MEMTYPE_WB
     2017            pVCpu->hm.s.vmx.GCPhysEPTP |=   VMX_EPT_MEMTYPE_WB
    20182018                                             | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
    20192019
    2020             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hwaccm.s.vmx.GCPhysEPTP);
     2020            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP);
    20212021            AssertRC(rc);
    20222022
    20232023            if (    !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    2024                 &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     2024                &&  !pVM->hm.s.vmx.fUnrestrictedGuest)
    20252025            {
    20262026                RTGCPHYS GCPhys;
    20272027
    20282028                /* We convert it here every time as PCI regions could be reconfigured. */
    2029                 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    2030                 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable));
     2029                rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
     2030                AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hm.s.vmx.pNonPagingModeEPTPageTable));
    20312031
    20322032                /*
     
    20582058     * Guest CPU context: Debug registers.
    20592059     */
    2060     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     2060    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
    20612061    {
    20622062        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    20942094            &&  !DBGFIsStepping(pVCpu))
    20952095        {
    2096             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
     2096            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    20972097
    20982098            /* Disable DRx move intercepts. */
    2099             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    2100             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2099            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     2100            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    21012101            AssertRC(rc);
    21022102
     
    21232123        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    21242124#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    2125         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0SwitcherStartVM64;
     2125        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0SwitcherStartVM64;
    21262126#else
    21272127# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    2128         if (!pVM->hwaccm.s.fAllow64BitGuests)
     2128        if (!pVM->hm.s.fAllow64BitGuests)
    21292129            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    21302130# endif
    2131         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM64;
    2132 #endif
    2133         if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)
     2131        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0StartVM64;
     2132#endif
     2133        if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
    21342134        {
    21352135            /* Update these as wrmsr might have changed them. */
     
    21422142    else
    21432143    {
    2144         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM32;
     2144        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0StartVM32;
    21452145    }
    21462146
     
    21522152     * during VM-entry and restored into the VM-exit store area during VM-exit.
    21532153     */
    2154     PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     2154    PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR;
    21552155    unsigned idxMsr = 0;
    21562156
     
    21962196    }
    21972197
    2198     if (   pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP
     2198    if (   pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP
    21992199        && (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP))
    22002200    {
     
    22062206    }
    22072207
    2208     pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr;
     2208    pVCpu->hm.s.vmx.cCachedMSRs = idxMsr;
    22092209
    22102210    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
     
    22162216
    22172217    bool fOffsettedTsc;
    2218     if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
    2219     {
    2220         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2218    if (pVM->hm.s.vmx.fUsePreemptTimer)
     2219    {
     2220        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
    22212221
    22222222        /* Make sure the returned values have sane upper and lower boundaries. */
     
    22262226        cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
    22272227
    2228         cTicksToDeadline >>= pVM->hwaccm.s.vmx.cPreemptTimerShift;
     2228        cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
    22292229        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    22302230        rc = VMXWriteVMCS(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount);
     
    22322232    }
    22332233    else
    2234         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2234        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
    22352235
    22362236    if (fOffsettedTsc)
    22372237    {
    22382238        uint64_t u64CurTSC = ASMReadTSC();
    2239         if (u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
     2239        if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    22402240        {
    22412241            /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    2242             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2242            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);
    22432243            AssertRC(rc);
    22442244
    2245             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2246             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2245            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2246            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22472247            AssertRC(rc);
    2248             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
     2248            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset);
    22492249        }
    22502250        else
     
    22522252            /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */
    22532253            LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
    2254                      pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,
    2255                      TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset,
     2254                     pVCpu->hm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset,
     2255                     TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset,
    22562256                     TMCpuTickGet(pVCpu)));
    2257             pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2258             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2257            pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2258            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22592259            AssertRC(rc);
    2260             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow);
     2260            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
    22612261        }
    22622262    }
    22632263    else
    22642264    {
    2265         pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2266         rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2265        pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2266        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22672267        AssertRC(rc);
    2268         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
     2268        STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept);
    22692269    }
    22702270
    22712271    /* Done with the major changes */
    2272     pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     2272    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
    22732273
    22742274    /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */
     
    23182318    VMXReadCachedVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW,     &valShadow);
    23192319    VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR0,              &val);
    2320     val = (valShadow & pVCpu->hwaccm.s.vmx.cr0_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr0_mask);
     2320    val = (valShadow & pVCpu->hm.s.vmx.cr0_mask) | (val & ~pVCpu->hm.s.vmx.cr0_mask);
    23212321    CPUMSetGuestCR0(pVCpu, val);
    23222322
    23232323    VMXReadCachedVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW,     &valShadow);
    23242324    VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR4,              &val);
    2325     val = (valShadow & pVCpu->hwaccm.s.vmx.cr4_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr4_mask);
     2325    val = (valShadow & pVCpu->hm.s.vmx.cr4_mask) | (val & ~pVCpu->hm.s.vmx.cr4_mask);
    23262326    CPUMSetGuestCR4(pVCpu, val);
    23272327
     
    23302330     * the nested paging case where CR3 & CR4 can be changed by the guest.
    23312331     */
    2332     if (   pVM->hwaccm.s.fNestedPaging
     2332    if (   pVM->hm.s.fNestedPaging
    23332333        && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */
    23342334    {
    2335         PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     2335        PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    23362336
    23372337        /* Can be updated behind our back in the nested paging case. */
     
    23842384    /* Real mode emulation using v86 mode. */
    23852385    if (    CPUMIsGuestInRealModeEx(pCtx)
    2386         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     2386        &&  pVM->hm.s.vmx.pRealModeTSS)
    23872387    {
    23882388        /* Hide our emulation flags */
     
    23902390
    23912391        /* Restore original IOPL setting as we always use 0. */
    2392         pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL;
     2392        pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
    23932393
    23942394        /* Force a TR resync every time in case we switch modes. */
    2395         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;
     2395        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_TR;
    23962396    }
    23972397    else
     
    24052405     * Save the possibly changed MSRs that we automatically restore and save during a world switch.
    24062406     */
    2407     for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++)
    2408     {
    2409         PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     2407    for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMSRs; i++)
     2408    {
     2409        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR;
    24102410        pMsr += i;
    24112411
     
    24582458    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    24592459    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    2460     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2460    pVCpu->hm.s.TlbShootdown.cPages = 0;
    24612461    return;
    24622462}
     
    24732473    PHMGLOBLCPUINFO pCpu;
    24742474
    2475     Assert(pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID);
    2476 
    2477     pCpu = HWACCMR0GetCurrentCpu();
     2475    Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID);
     2476
     2477    pCpu = HMR0GetCurrentCpu();
    24782478
    24792479    /*
     
    24842484     */
    24852485    bool fNewASID = false;
    2486     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2487         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    2488     {
    2489         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2486    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2487        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2488    {
     2489        pVCpu->hm.s.fForceTLBFlush = true;
    24902490        fNewASID = true;
    24912491    }
     
    24952495     */
    24962496    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2497         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2498 
    2499     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    2500 
    2501     if (pVCpu->hwaccm.s.fForceTLBFlush)
     2497        pVCpu->hm.s.fForceTLBFlush = true;
     2498
     2499    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2500
     2501    if (pVCpu->hm.s.fForceTLBFlush)
    25022502    {
    25032503        if (fNewASID)
    25042504        {
    25052505            ++pCpu->uCurrentASID;
    2506             if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     2506            if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    25072507            {
    25082508                pCpu->uCurrentASID = 1;       /* start at 1; host uses 0 */
     
    25112511            }
    25122512
    2513             pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     2513            pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
    25142514            if (pCpu->fFlushASIDBeforeUse)
    25152515            {
    2516                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2516                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    25172517#ifdef VBOX_WITH_STATISTICS
    2518                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
     2518                STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
    25192519#endif
    25202520            }
     
    25222522        else
    25232523        {
    2524             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
     2524            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    25252525                hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
    25262526            else
    2527                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2527                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    25282528
    25292529#ifdef VBOX_WITH_STATISTICS
     
    25322532             * as ASID flushes too, better than including them under StatFlushTLBWorldSwitch.
    25332533             */
    2534             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
    2535 #endif
    2536         }
    2537 
    2538         pVCpu->hwaccm.s.cTLBFlushes    = pCpu->cTLBFlushes;
    2539         pVCpu->hwaccm.s.fForceTLBFlush = false;
     2534            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
     2535#endif
     2536        }
     2537
     2538        pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
     2539        pVCpu->hm.s.fForceTLBFlush = false;
    25402540    }
    25412541    else
    25422542    {
    2543         AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
    2544                   ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2545                    pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
     2543        AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
     2544                  ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
     2545                   pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    25462546                   pCpu->uCurrentASID, pCpu->cTLBFlushes));
    25472547
    25482548        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2549          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2549         *        not be executed. See hmQueueInvlPage() where it is commented
    25502550         *        out. Support individual entry flushing someday. */
    25512551        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    25522552        {
    2553             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
     2553            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    25542554
    25552555            /*
     
    25572557             * as supported by the CPU.
    25582558             */
    2559             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     2559            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    25602560            {
    2561                 for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    2562                     hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
     2561                for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     2562                    hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    25632563            }
    25642564            else
    2565                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2565                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    25662566        }
    25672567        else
    25682568        {
    25692569#ifdef VBOX_WITH_STATISTICS
    2570             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
    2571 #endif
    2572         }
    2573     }
    2574     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2570            STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
     2571#endif
     2572        }
     2573    }
     2574    pVCpu->hm.s.TlbShootdown.cPages = 0;
    25752575    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    25762576
    2577     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2578               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2579     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     2577    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     2578              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     2579    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    25802580              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2581     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    2582               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     2581    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     2582              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
    25832583
    25842584    /* Update VMCS with the VPID. */
    2585     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
     2585    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
    25862586    AssertRC(rc);
    25872587}
     
    25992599    PHMGLOBLCPUINFO pCpu;
    26002600
    2601     Assert(pVM->hwaccm.s.fNestedPaging);
    2602     Assert(!pVM->hwaccm.s.vmx.fVPID);
    2603 
    2604     pCpu = HWACCMR0GetCurrentCpu();
     2601    Assert(pVM->hm.s.fNestedPaging);
     2602    Assert(!pVM->hm.s.vmx.fVPID);
     2603
     2604    pCpu = HMR0GetCurrentCpu();
    26052605
    26062606    /*
     
    26092609     * A change in the TLB flush count implies the host Cpu is online after a suspend/resume.
    26102610     */
    2611     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2612         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    2613     {
    2614         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2611    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2612        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2613    {
     2614        pVCpu->hm.s.fForceTLBFlush = true;
    26152615    }
    26162616
     
    26192619     */
    26202620    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2621         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2622 
    2623     pVCpu->hwaccm.s.idLastCpu   = pCpu->idCpu;
    2624     pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
    2625 
    2626     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2627         hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2621        pVCpu->hm.s.fForceTLBFlush = true;
     2622
     2623    pVCpu->hm.s.idLastCpu   = pCpu->idCpu;
     2624    pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes;
     2625
     2626    if (pVCpu->hm.s.fForceTLBFlush)
     2627        hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    26282628    else
    26292629    {
    26302630        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2631          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2631         *        not be executed. See hmQueueInvlPage() where it is commented
    26322632         *        out. Support individual entry flushing someday. */
    26332633        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     
    26362636             * We cannot flush individual entries without VPID support. Flush using EPT.
    26372637             */
    2638             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    2639             hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
    2640         }
    2641     }
    2642     pVCpu->hwaccm.s.TlbShootdown.cPages= 0;
     2638            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
     2639            hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
     2640        }
     2641    }
     2642    pVCpu->hm.s.TlbShootdown.cPages= 0;
    26432643    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    26442644
    26452645#ifdef VBOX_WITH_STATISTICS
    2646     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2647         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     2646    if (pVCpu->hm.s.fForceTLBFlush)
     2647        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    26482648    else
    2649         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     2649        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    26502650#endif
    26512651}
     
    26632663    PHMGLOBLCPUINFO pCpu;
    26642664
    2665     Assert(pVM->hwaccm.s.vmx.fVPID);
    2666     Assert(!pVM->hwaccm.s.fNestedPaging);
    2667 
    2668     pCpu = HWACCMR0GetCurrentCpu();
     2665    Assert(pVM->hm.s.vmx.fVPID);
     2666    Assert(!pVM->hm.s.fNestedPaging);
     2667
     2668    pCpu = HMR0GetCurrentCpu();
    26692669
    26702670    /*
     
    26742674     * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
    26752675     */
    2676     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2677         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2676    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2677        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
    26782678    {
    26792679        /* Force a TLB flush on VM entry. */
    2680         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2680        pVCpu->hm.s.fForceTLBFlush = true;
    26812681    }
    26822682
     
    26852685     */
    26862686    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2687         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2688 
    2689     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    2690 
    2691     if (pVCpu->hwaccm.s.fForceTLBFlush)
     2687        pVCpu->hm.s.fForceTLBFlush = true;
     2688
     2689    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2690
     2691    if (pVCpu->hm.s.fForceTLBFlush)
    26922692    {
    26932693        ++pCpu->uCurrentASID;
    2694         if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     2694        if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    26952695        {
    26962696            pCpu->uCurrentASID               = 1;       /* start at 1; host uses 0 */
     
    26992699        }
    27002700        else
    2701             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
    2702 
    2703         pVCpu->hwaccm.s.fForceTLBFlush = false;
    2704         pVCpu->hwaccm.s.cTLBFlushes    = pCpu->cTLBFlushes;
    2705         pVCpu->hwaccm.s.uCurrentASID   = pCpu->uCurrentASID;
     2701            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
     2702
     2703        pVCpu->hm.s.fForceTLBFlush = false;
     2704        pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
     2705        pVCpu->hm.s.uCurrentASID   = pCpu->uCurrentASID;
    27062706        if (pCpu->fFlushASIDBeforeUse)
    2707             hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2707            hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    27082708    }
    27092709    else
    27102710    {
    2711         AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
    2712                   ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2713                    pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
     2711        AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
     2712                  ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
     2713                   pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    27142714                   pCpu->uCurrentASID, pCpu->cTLBFlushes));
    27152715
    27162716        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2717          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2717         *        not be executed. See hmQueueInvlPage() where it is commented
    27182718         *        out. Support individual entry flushing someday. */
    27192719        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     
    27232723             * as supported by the CPU.
    27242724             */
    2725             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     2725            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    27262726            {
    2727                 for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    2728                     hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
     2727                for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     2728                    hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    27292729            }
    27302730            else
    2731                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    2732         }
    2733     }
    2734     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2731                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2732        }
     2733    }
     2734    pVCpu->hm.s.TlbShootdown.cPages = 0;
    27352735    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    27362736
    2737     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2738               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2739     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     2737    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     2738              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     2739    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    27402740              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2741     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    2742               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
    2743 
    2744     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
     2741    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     2742              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
     2743
     2744    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
    27452745    AssertRC(rc);
    27462746
    27472747# ifdef VBOX_WITH_STATISTICS
    2748     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2749         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     2748    if (pVCpu->hm.s.fForceTLBFlush)
     2749        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    27502750    else
    2751         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     2751        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    27522752# endif
    27532753}
     
    27642764VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    27652765{
    2766     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatEntry, x);
    2767     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit1);
    2768     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit2);
     2766    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     2767    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     2768    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    27692769
    27702770    VBOXSTRICTRC rc = VINF_SUCCESS;
     
    27892789#endif
    27902790
    2791     Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    2792            || (pVCpu->hwaccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC));
     2791    Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2792           || (pVCpu->hm.s.vmx.pbVAPIC && pVM->hm.s.vmx.pAPIC));
    27932793
    27942794    /*
     
    27962796     */
    27972797    if (    CPUMIsGuestInLongModeEx(pCtx)
    2798         || (   ((   pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    2799                  || pVM->hwaccm.s.fTRPPatchingAllowed)
    2800             &&  pVM->hwaccm.s.fHasIoApic)
     2798        || (   ((   pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2799                 || pVM->hm.s.fTRPPatchingAllowed)
     2800            &&  pVM->hm.s.fHasIoApic)
    28012801       )
    28022802    {
     
    28072807
    28082808    /* This is not ideal, but if we don't clear the event injection in the VMCS right here,
    2809      * we may end up injecting some stale event into a VM, including injecting an event that 
     2809     * we may end up injecting some stale event into a VM, including injecting an event that
    28102810     * originated before a VM reset *after* the VM has been reset. See @bugref{6220}.
    28112811     */
     
    28212821
    28222822        /* allowed zero */
    2823         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
     2823        if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
    28242824            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
    28252825
    28262826        /* allowed one */
    2827         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
     2827        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
    28282828            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
    28292829
     
    28352835         * Must be set according to the MSR, but can be cleared if nested paging is used.
    28362836         */
    2837         if (pVM->hwaccm.s.fNestedPaging)
     2837        if (pVM->hm.s.fNestedPaging)
    28382838        {
    28392839            val2 |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     
    28432843
    28442844        /* allowed zero */
    2845         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
     2845        if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
    28462846            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
    28472847
    28482848        /* allowed one */
    2849         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
     2849        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
    28502850            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
    28512851
     
    28552855
    28562856        /* allowed zero */
    2857         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0)
     2857        if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0)
    28582858            Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
    28592859
    28602860        /* allowed one */
    2861         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
     2861        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
    28622862            Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
    28632863
     
    28672867
    28682868        /* allowed zero */
    2869         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0)
     2869        if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0)
    28702870            Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
    28712871
    28722872        /* allowed one */
    2873         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
     2873        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
    28742874            Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
    28752875    }
     
    28782878
    28792879#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2880     pVCpu->hwaccm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();
     2880    pVCpu->hm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();
    28812881#endif
    28822882
     
    28852885     */
    28862886ResumeExecution:
    2887     if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hwaccm.s.StatEntry))
    2888         STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);
    2889     AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == RTMpCpuId(),
     2887    if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
     2888        STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
     2889    AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    28902890              ("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n",
    2891                (int)pVCpu->hwaccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
    2892     Assert(!HWACCMR0SuspendPending());
     2891               (int)pVCpu->hm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
     2892    Assert(!HMR0SuspendPending());
    28932893    /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */
    28942894    Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx));
     
    28972897     * Safety precaution; looping for too long here can have a very bad effect on the host.
    28982898     */
    2899     if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    2900     {
    2901         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     2899    if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
     2900    {
     2901        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    29022902        rc = VINF_EM_RAW_INTERRUPT;
    29032903        goto end;
     
    29472947     * Check for pending actions that force us to go back to ring-3.
    29482948     */
    2949     if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    2950         ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
     2949    if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
     2950        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
    29512951    {
    29522952        /* Check if a sync operation is pending. */
     
    29672967#endif
    29682968        {
    2969             if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
    2970                 ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     2969            if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
     2970                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    29712971            {
    2972                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
     2972                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
    29732973                rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    29742974                goto end;
     
    30133013    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    30143014    {
    3015         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     3015        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
    30163016        rc = VINF_EM_RAW_INTERRUPT;
    30173017        goto end;
     
    30453045        AssertRC(rc2);
    30463046        /* The TPR can be found at offset 0x80 in the APIC mmio page. */
    3047         pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = u8LastTPR;
     3047        pVCpu->hm.s.vmx.pbVAPIC[0x80] = u8LastTPR;
    30483048
    30493049        /*
     
    30593059        AssertRC(VBOXSTRICTRC_VAL(rc));
    30603060
    3061         if (pVM->hwaccm.s.fTPRPatchingActive)
     3061        if (pVM->hm.s.fTPRPatchingActive)
    30623062        {
    30633063            Assert(!CPUMIsGuestInLongModeEx(pCtx));
     
    30833083
    30843084#ifdef LOG_ENABLED
    3085     if (    pVM->hwaccm.s.fNestedPaging
    3086         ||  pVM->hwaccm.s.vmx.fVPID)
    3087     {
    3088         PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    3089         if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
    3090         {
    3091             LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu,
     3085    if (    pVM->hm.s.fNestedPaging
     3086        ||  pVM->hm.s.vmx.fVPID)
     3087    {
     3088        PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     3089        if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
     3090        {
     3091            LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu,
    30923092                     pCpu->idCpu));
    30933093        }
    3094         else if (pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    3095         {
    3096             LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes,
     3094        else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     3095        {
     3096            LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes,
    30973097                     pCpu->cTLBFlushes));
    30983098        }
     
    31193119     * Save the host state first.
    31203120     */
    3121     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     3121    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
    31223122    {
    31233123        rc  = VMXR0SaveHostState(pVM, pVCpu);
     
    31323132     * Load the guest state.
    31333133     */
    3134     if (!pVCpu->hwaccm.s.fContextUseFlags)
     3134    if (!pVCpu->hm.s.fContextUseFlags)
    31353135    {
    31363136        VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx);
    3137         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatLoadMinimal);
     3137        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    31383138    }
    31393139    else
     
    31453145            goto end;
    31463146        }
    3147         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatLoadFull);
     3147        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    31483148    }
    31493149
     
    31633163
    31643164    /* Set TLB flush state as checked until we return from the world switch. */
    3165     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
     3165    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    31663166    /* Deal with tagged TLB setup and invalidation. */
    3167     pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
     3167    pVM->hm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
    31683168
    31693169    /*
     
    31803180
    31813181    /* All done! Let's start VM execution. */
    3182     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
     3182    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    31833183    Assert(idCpuCheck == RTMpCpuId());
    31843184
    31853185#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    3186     pVCpu->hwaccm.s.vmx.VMCSCache.cResume = cResume;
    3187     pVCpu->hwaccm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
     3186    pVCpu->hm.s.vmx.VMCSCache.cResume = cResume;
     3187    pVCpu->hm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
    31883188#endif
    31893189
     
    31913191     * Save the current TPR value in the LSTAR MSR so our patches can access it.
    31923192     */
    3193     if (pVM->hwaccm.s.fTPRPatchingActive)
    3194     {
    3195         Assert(pVM->hwaccm.s.fTPRPatchingActive);
     3193    if (pVM->hm.s.fTPRPatchingActive)
     3194    {
     3195        Assert(pVM->hm.s.fTPRPatchingActive);
    31963196        u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR);
    31973197        ASMWrMsr(MSR_K8_LSTAR, u8LastTPR);
     
    32053205     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    32063206     */
    3207     if (    (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3208         && !(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    3209     {
    3210         pVCpu->hwaccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
     3207    if (    (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     3208        && !(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3209    {
     3210        pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    32113211        uint64_t u64GuestTSCAux = 0;
    32123212        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     
    32173217
    32183218#ifdef VBOX_WITH_KERNEL_USING_XMM
    3219     rc = hwaccmR0VMXStartVMWrapXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);
     3219    rc = hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
    32203220#else
    3221     rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);
    3222 #endif
    3223     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    3224     ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
     3221    rc = pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
     3222#endif
     3223    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
     3224    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
    32253225
    32263226    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    3227     if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3227    if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    32283228    {
    32293229#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    32303230        /* Restore host's TSC_AUX. */
    3231         if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3232             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.u64HostTSCAux);
     3231        if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     3232            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
    32333233#endif
    32343234
    32353235        TMCpuTickSetLastSeen(pVCpu,
    3236                              ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     3236                             ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
    32373237    }
    32383238
     
    32443244     * Restore the host LSTAR MSR if the guest could have changed it.
    32453245     */
    3246     if (pVM->hwaccm.s.fTPRPatchingActive)
    3247     {
    3248         Assert(pVM->hwaccm.s.fTPRPatchingActive);
    3249         pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
     3246    if (pVM->hm.s.fTPRPatchingActive)
     3247    {
     3248        Assert(pVM->hm.s.fTPRPatchingActive);
     3249        pVCpu->hm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
    32503250        ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR);
    32513251    }
    32523252
    3253     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);
     3253    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    32543254    ASMSetFlags(uOldEFlags);
    32553255#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     
    32573257#endif
    32583258
    3259     AssertMsg(!pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",
    3260                                                                    pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries));
     3259    AssertMsg(!pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",
     3260                                                                   pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries));
    32613261
    32623262    /* In case we execute a goto ResumeExecution later on. */
    3263     pVCpu->hwaccm.s.fResumeVM  = true;
    3264     pVCpu->hwaccm.s.fForceTLBFlush = false;
     3263    pVCpu->hm.s.fResumeVM  = true;
     3264    pVCpu->hm.s.fForceTLBFlush = false;
    32653265
    32663266    /*
     
    32813281    /* Investigate why there was a VM-exit. */
    32823282    rc2  = VMXReadCachedVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
    3283     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
     3283    STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
    32843284
    32853285    exitReason &= 0xffff;   /* bit 0-15 contain the exit code. */
     
    33113311    rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO,            &val);
    33123312    AssertRC(rc2);
    3313     pVCpu->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
    3314     if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     3313    pVCpu->hm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
     3314    if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo)
    33153315        /* Ignore 'int xx' as they'll be restarted anyway. */
    3316         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW
     3316        &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW
    33173317        /* Ignore software exceptions (such as int3) as they'll reoccur when we restart the instruction anyway. */
    3318         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
    3319     {
    3320         Assert(!pVCpu->hwaccm.s.Event.fPending);
    3321         pVCpu->hwaccm.s.Event.fPending = true;
     3318        &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
     3319    {
     3320        Assert(!pVCpu->hm.s.Event.fPending);
     3321        pVCpu->hm.s.Event.fPending = true;
    33223322        /* Error code present? */
    3323         if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo))
     3323        if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo))
    33243324        {
    33253325            rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_ERRCODE, &val);
    33263326            AssertRC(rc2);
    3327             pVCpu->hwaccm.s.Event.errCode  = val;
     3327            pVCpu->hm.s.Event.errCode  = val;
    33283328            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n",
    3329                  pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
     3329                 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
    33303330        }
    33313331        else
    33323332        {
    3333             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo,
     3333            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hm.s.Event.intInfo,
    33343334                 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    3335             pVCpu->hwaccm.s.Event.errCode  = 0;
     3335            pVCpu->hm.s.Event.errCode  = 0;
    33363336        }
    33373337    }
    33383338#ifdef VBOX_STRICT
    3339     else if (   VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     3339    else if (   VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo)
    33403340                /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */
    3341              && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
     3341             && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
    33423342    {
    33433343        Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n",
    3344              pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
     3344             pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    33453345    }
    33463346
    33473347    if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
    3348         HWACCMDumpRegs(pVM, pVCpu, pCtx);
     3348        HMDumpRegs(pVM, pVCpu, pCtx);
    33493349#endif
    33503350
     
    33593359     */
    33603360    if (    fSetupTPRCaching
    3361         &&  u8LastTPR != pVCpu->hwaccm.s.vmx.pbVAPIC[0x80])
    3362     {
    3363         rc2 = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.pbVAPIC[0x80]);
     3361        &&  u8LastTPR != pVCpu->hm.s.vmx.pbVAPIC[0x80])
     3362    {
     3363        rc2 = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVAPIC[0x80]);
    33643364        AssertRC(rc2);
    33653365    }
     
    33693369                      exitReason, (uint64_t)exitQualification, pCtx->cs.Sel, pCtx->rip, (uint64_t)intInfo);
    33703370#endif
    3371     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);
     3371    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    33723372
    33733373    /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
     
    33923392            break;
    33933393        }
    3394         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3394        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub3, y3);
    33953395        switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
    33963396        {
     
    34233423                    Assert(CPUMIsGuestFPUStateActive(pVCpu));
    34243424
    3425                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowNM);
     3425                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    34263426
    34273427                    /* Continue execution. */
    3428                     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    3429 
    3430                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3428                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     3429
     3430                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34313431                    goto ResumeExecution;
    34323432                }
    34333433
    34343434                Log(("Forward #NM fault to the guest\n"));
    3435                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
     3435                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    34363436                rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
    34373437                                         cbInstr, 0);
    34383438                AssertRC(rc2);
    3439                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3439                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34403440                goto ResumeExecution;
    34413441            }
     
    34443444            {
    34453445#ifdef VBOX_ALWAYS_TRAP_PF
    3446                 if (pVM->hwaccm.s.fNestedPaging)
     3446                if (pVM->hm.s.fNestedPaging)
    34473447                {
    34483448                    /*
     
    34543454                    Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx));
    34553455
    3456                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     3456                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    34573457
    34583458                    /* Now we must update CR2. */
     
    34623462                    AssertRC(rc2);
    34633463
    3464                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3464                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34653465                    goto ResumeExecution;
    34663466                }
    34673467#else
    3468                 Assert(!pVM->hwaccm.s.fNestedPaging);
    3469 #endif
    3470 
    3471 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     3468                Assert(!pVM->hm.s.fNestedPaging);
     3469#endif
     3470
     3471#ifdef VBOX_HM_WITH_GUEST_PATCHING
    34723472                /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    3473                 if (    pVM->hwaccm.s.fTRPPatchingAllowed
    3474                     &&  pVM->hwaccm.s.pGuestPatchMem
     3473                if (    pVM->hm.s.fTRPPatchingAllowed
     3474                    &&  pVM->hm.s.pGuestPatchMem
    34753475                    &&  (exitQualification & 0xfff) == 0x080
    34763476                    &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    34773477                    &&  CPUMGetGuestCPL(pVCpu) == 0
    34783478                    &&  !CPUMIsGuestInLongModeEx(pCtx)
    3479                     &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     3479                    &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    34803480                {
    34813481                    RTGCPHYS GCPhysApicBase, GCPhys;
     
    34883488                    {
    34893489                        /* Only attempt to patch the instruction once. */
    3490                         PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     3490                        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    34913491                        if (!pPatch)
    34923492                        {
    3493                             rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     3493                            rc = VINF_EM_HM_PATCH_TPR_INSTR;
    34943494                            break;
    34953495                        }
     
    35083508                    &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    35093509                    &&  fSetupTPRCaching
    3510                     &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     3510                    &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    35113511                {
    35123512                    RTGCPHYS GCPhysApicBase, GCPhys;
     
    35193519                    {
    35203520                        Log(("Enable VT-x virtual APIC access filtering\n"));
    3521                         rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     3521                        rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    35223522                        AssertRC(rc2);
    35233523                    }
     
    35313531                {   /* We've successfully synced our shadow pages, so let's just continue execution. */
    35323532                    Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode));
    3533                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     3533                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    35343534
    35353535                    TRPMResetTrap(pVCpu);
    3536                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3536                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35373537                    goto ResumeExecution;
    35383538                }
     
    35443544                    Log2(("Forward page fault to the guest\n"));
    35453545
    3546                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     3546                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    35473547                    /* The error code might have been changed. */
    35483548                    errCode = TRPMGetErrorCode(pVCpu);
     
    35563556                    AssertRC(rc2);
    35573557
    3558                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3558                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35593559                    goto ResumeExecution;
    35603560                }
     
    35643564#endif
    35653565                /* Need to go back to the recompiler to emulate the instruction. */
    3566                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPFEM);
     3566                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
    35673567                TRPMResetTrap(pVCpu);
    35683568                break;
     
    35713571            case X86_XCPT_MF: /* Floating point exception. */
    35723572            {
    3573                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestMF);
     3573                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    35743574                if (!(pCtx->cr0 & X86_CR0_NE))
    35753575                {
     
    35843584                AssertRC(rc2);
    35853585
    3586                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3586                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35873587                goto ResumeExecution;
    35883588            }
     
    36023602                 * 63:15    Reserved (0)
    36033603                 */
    3604                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDB);
     3604                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    36053605
    36063606                /* Note that we don't support guest and host-initiated debugging at the same time. */
     
    36363636                    AssertRC(rc2);
    36373637
    3638                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3638                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36393639                    goto ResumeExecution;
    36403640                }
     
    36463646            case X86_XCPT_BP:   /* Breakpoint. */
    36473647            {
    3648                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestBP);
     3648                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    36493649                rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    36503650                if (rc == VINF_EM_RAW_GUEST_TRAP)
     
    36543654                                             cbInstr, errCode);
    36553655                    AssertRC(rc2);
    3656                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3656                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36573657                    goto ResumeExecution;
    36583658                }
    36593659                if (rc == VINF_SUCCESS)
    36603660                {
    3661                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3661                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36623662                    goto ResumeExecution;
    36633663                }
     
    36693669            {
    36703670                uint32_t     cbOp;
    3671                 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
    3672 
    3673                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
     3671                PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
     3672
     3673                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    36743674#ifdef VBOX_STRICT
    36753675                if (    !CPUMIsGuestInRealModeEx(pCtx)
    3676                     ||  !pVM->hwaccm.s.vmx.pRealModeTSS)
     3676                    ||  !pVM->hm.s.vmx.pRealModeTSS)
    36773677                {
    36783678                    Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, errCode));
     
    36803680                                             cbInstr, errCode);
    36813681                    AssertRC(rc2);
    3682                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3682                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36833683                    goto ResumeExecution;
    36843684                }
     
    36993699                        case OP_CLI:
    37003700                            pCtx->eflags.Bits.u1IF = 0;
    3701                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCli);
     3701                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
    37023702                            break;
    37033703
     
    37093709                                               VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    37103710                            AssertRC(rc2);
    3711                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti);
     3711                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    37123712                            break;
    37133713
     
    37163716                            rc = VINF_EM_HALT;
    37173717                            pCtx->rip += pDis->cbInstr;
    3718                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     3718                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    37193719                            break;
    37203720
     
    37583758                            pCtx->esp &= uMask;
    37593759
    3760                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPopf);
     3760                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
    37613761                            break;
    37623762                        }
     
    38013801                            pCtx->esp -= cbParm;
    38023802                            pCtx->esp &= uMask;
    3803                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPushf);
     3803                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
    38043804                            break;
    38053805                        }
     
    38393839                            LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip));
    38403840                            fUpdateRIP = false;
    3841                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIret);
     3841                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
    38423842                            break;
    38433843                        }
     
    38553855                            AssertRC(VBOXSTRICTRC_VAL(rc));
    38563856                            fUpdateRIP = false;
    3857                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3857                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38583858                            break;
    38593859                        }
     
    38733873                                AssertRC(VBOXSTRICTRC_VAL(rc));
    38743874                                fUpdateRIP = false;
    3875                                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3875                                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38763876                            }
    38773877                            break;
     
    38903890                            AssertRC(VBOXSTRICTRC_VAL(rc));
    38913891                            fUpdateRIP = false;
    3892                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3892                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38933893                            break;
    38943894                        }
     
    39093909                         * whole context to be done with it.
    39103910                         */
    3911                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     3911                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    39123912
    39133913                        /* Only resume if successful. */
    3914                         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3914                        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39153915                        goto ResumeExecution;
    39163916                    }
     
    39333933                switch (vector)
    39343934                {
    3935                     case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE); break;
    3936                     case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD); break;
    3937                     case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS); break;
    3938                     case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP); break;
    3939                     case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF); break;
     3935                    case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
     3936                    case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
     3937                    case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
     3938                    case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
     3939                    case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
    39403940                }
    39413941
     
    39453945                AssertRC(rc2);
    39463946
    3947                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3947                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39483948                goto ResumeExecution;
    39493949            }
    39503950#endif
    39513951            default:
    3952                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXcpUnk);
     3952                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
    39533953                if (    CPUMIsGuestInRealModeEx(pCtx)
    3954                     &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     3954                    &&  pVM->hm.s.vmx.pRealModeTSS)
    39553955                {
    39563956                    Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs.Sel, pCtx->eip, errCode));
     
    39663966                    }
    39673967
    3968                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3968                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39693969                    goto ResumeExecution;
    39703970                }
     
    39823982        }
    39833983
    3984         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3984        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39853985        break;
    39863986    }
     
    39943994        RTGCPHYS GCPhys;
    39953995
    3996         Assert(pVM->hwaccm.s.fNestedPaging);
     3996        Assert(pVM->hm.s.fNestedPaging);
    39973997
    39983998        rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys);
     
    40174017                &&  GCPhys > 0x1000000          /* to skip VGA frame buffer accesses */
    40184018                &&  fSetupTPRCaching
    4019                 &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     4019                &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    40204020            {
    40214021                RTGCPHYS GCPhysApicBase;
     
    40254025                {
    40264026                    Log(("Enable VT-x virtual APIC access filtering\n"));
    4027                     rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     4027                    rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    40284028                    AssertRC(rc2);
    40294029                }
     
    40494049            /* We've successfully synced our shadow pages, so let's just continue execution. */
    40504050            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode));
    4051             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
     4051            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF);
    40524052
    40534053            TRPMResetTrap(pVCpu);
     
    40684068        RTGCPHYS GCPhys;
    40694069
    4070         Assert(pVM->hwaccm.s.fNestedPaging);
     4070        Assert(pVM->hm.s.fNestedPaging);
    40714071
    40724072        rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys);
     
    40784078            &&  GCPhys > 0x1000000              /* to skip VGA frame buffer accesses */
    40794079            &&  fSetupTPRCaching
    4080             &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     4080            &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    40814081        {
    40824082            RTGCPHYS GCPhysApicBase;
     
    40864086            {
    40874087                Log(("Enable VT-x virtual APIC access filtering\n"));
    4088                 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     4088                rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    40894089                AssertRC(rc2);
    40904090            }
     
    41164116        LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip,
    41174117                 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
    4118         pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    4119         rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4118        pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
     4119        rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    41204120        AssertRC(rc2);
    4121         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIrqWindow);
     4121        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIrqWindow);
    41224122        goto ResumeExecution;   /* we check for pending guest interrupts there */
    41234123
    41244124    case VMX_EXIT_WBINVD:               /* 54 Guest software attempted to execute WBINVD. (conditional) */
    41254125    case VMX_EXIT_INVD:                 /* 13 Guest software attempted to execute INVD. (unconditional) */
    4126         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvd);
     4126        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    41274127        /* Skip instruction and continue directly. */
    41284128        pCtx->rip += cbInstr;
     
    41334133    {
    41344134        Log2(("VMX: Cpuid %x\n", pCtx->eax));
    4135         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid);
     4135        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    41364136        rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41374137        if (rc == VINF_SUCCESS)
     
    41504150    {
    41514151        Log2(("VMX: Rdpmc %x\n", pCtx->ecx));
    4152         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc);
     4152        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
    41534153        rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41544154        if (rc == VINF_SUCCESS)
     
    41664166    {
    41674167        Log2(("VMX: Rdtsc\n"));
    4168         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
     4168        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    41694169        rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41704170        if (rc == VINF_SUCCESS)
     
    41824182    {
    41834183        Log2(("VMX: Rdtscp\n"));
    4184         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
     4184        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    41854185        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
    41864186        if (rc == VINF_SUCCESS)
     
    41984198    {
    41994199        Log2(("VMX: invlpg\n"));
    4200         Assert(!pVM->hwaccm.s.fNestedPaging);
    4201 
    4202         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvlpg);
     4200        Assert(!pVM->hm.s.fNestedPaging);
     4201
     4202        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
    42034203        rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification);
    42044204        if (rc == VINF_SUCCESS)
     
    42164216        Log2(("VMX: monitor\n"));
    42174217
    4218         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMonitor);
     4218        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    42194219        rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    42204220        if (rc == VINF_SUCCESS)
     
    42304230    case VMX_EXIT_WRMSR:                /* 32 WRMSR. Guest software attempted to execute WRMSR. */
    42314231        /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
    4232         if (    pVM->hwaccm.s.fTPRPatchingActive
     4232        if (    pVM->hm.s.fTPRPatchingActive
    42334233            &&  pCtx->ecx == MSR_K8_LSTAR)
    42344234        {
     
    42494249            goto ResumeExecution;
    42504250        }
    4251         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_MSR;
     4251        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_MSR;
    42524252        /* no break */
    42534253    case VMX_EXIT_RDMSR:                /* 31 RDMSR. Guest software attempted to execute RDMSR. */
    42544254    {
    4255         STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
     4255        STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
    42564256
    42574257        /*
     
    42744274    case VMX_EXIT_CRX_MOVE:             /* 28 Control-register accesses. */
    42754275    {
    4276         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4276        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub2, y2);
    42774277
    42784278        switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
     
    42814281            {
    42824282                Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
    4283                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4283                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    42844284                rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    42854285                                         VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
     
    42884288                {
    42894289                    case 0:
    4290                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
     4290                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3;
    42914291                        break;
    42924292                    case 2:
    42934293                        break;
    42944294                    case 3:
    4295                         Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
    4296                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     4295                        Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
     4296                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    42974297                        break;
    42984298                    case 4:
    4299                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     4299                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    43004300                        break;
    43014301                    case 8:
    43024302                        /* CR8 contains the APIC TPR */
    4303                         Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1
     4303                        Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1
    43044304                                 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    43054305                        break;
     
    43154315            {
    43164316                Log2(("VMX: mov x, crx\n"));
    4317                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    4318 
    4319                 Assert(   !pVM->hwaccm.s.fNestedPaging
     4317                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4318
     4319                Assert(   !pVM->hm.s.fNestedPaging
    43204320                       || !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    43214321                       || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3);
     
    43234323                /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
    43244324                Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
    4325                        || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4325                       || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    43264326
    43274327                rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     
    43344334            {
    43354335                Log2(("VMX: clts\n"));
    4336                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
     4336                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCLTS);
    43374337                rc = EMInterpretCLTS(pVM, pVCpu);
    4338                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4338                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    43394339                break;
    43404340            }
     
    43434343            {
    43444344                Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
    4345                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
     4345                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLMSW);
    43464346                rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
    4347                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4347                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    43484348                break;
    43494349            }
     
    43574357        {
    43584358            /* Only resume if successful. */
    4359             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4359            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
    43604360            goto ResumeExecution;
    43614361        }
    43624362        Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
    4363         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4363        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
    43644364        break;
    43654365    }
     
    43714371        {
    43724372            /* Disable DRx move intercepts. */
    4373             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    4374             rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4373            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     4374            rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    43754375            AssertRC(rc2);
    43764376
     
    43904390
    43914391#ifdef VBOX_WITH_STATISTICS
    4392             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     4392            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    43934393            if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
    4394                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     4394                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    43954395            else
    4396                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     4396                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    43974397#endif
    43984398
     
    44064406            Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
    44074407                  VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
    4408             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     4408            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    44094409            rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    44104410                                     VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
    44114411                                     VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
    4412             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     4412            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    44134413            Log2(("DR7=%08x\n", pCtx->dr[7]));
    44144414        }
     
    44164416        {
    44174417            Log2(("VMX: mov x, DRx\n"));
    4418             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     4418            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    44194419            rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    44204420                                    VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
     
    44374437    case VMX_EXIT_PORT_IO:              /* 30 I/O instruction. */
    44384438    {
    4439         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4439        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub1, y1);
    44404440        uint32_t uPort;
    44414441        uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
     
    44514451        {
    44524452            rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ;
    4453             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4453            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    44544454            break;
    44554455        }
     
    44594459        {
    44604460            /* ins/outs */
    4461             PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
     4461            PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    44624462
    44634463            /* Disassemble manually to deal with segment prefixes. */
     
    44704470                {
    44714471                    Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
    4472                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringWrite);
     4472                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
    44734473                    rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
    44744474                }
     
    44764476                {
    44774477                    Log2(("IOMInterpretINSEx  %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
    4478                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringRead);
     4478                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
    44794479                    rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
    44804480                }
     
    44924492            if (fIOWrite)
    44934493            {
    4494                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
     4494                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    44954495                rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
    44964496                if (rc == VINF_IOM_R3_IOPORT_WRITE)
    4497                     HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
     4497                    HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
    44984498            }
    44994499            else
     
    45014501                uint32_t u32Val = 0;
    45024502
    4503                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIORead);
     4503                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    45044504                rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
    45054505                if (IOM_SUCCESS(rc))
     
    45104510                else
    45114511                if (rc == VINF_IOM_R3_IOPORT_READ)
    4512                     HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
     4512                    HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
    45134513            }
    45144514        }
     
    45274527                if (pCtx->dr[7] & X86_DR7_ENABLED_MASK)
    45284528                {
    4529                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
     4529                    STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck);
    45304530                    for (unsigned i = 0; i < 4; i++)
    45314531                    {
     
    45754575                            AssertRC(rc2);
    45764576
    4577                             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4577                            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45784578                            goto ResumeExecution;
    45794579                        }
    45804580                    }
    45814581                }
    4582                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4582                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45834583                goto ResumeExecution;
    45844584            }
    4585             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4585            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45864586            break;
    45874587        }
     
    46004600        }
    46014601#endif
    4602         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4602        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    46034603        break;
    46044604    }
     
    46864686        Log(("VMX_EXIT_TASK_SWITCH: exit=%RX64\n", exitQualification));
    46874687        if (    (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(exitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
    4688             &&  pVCpu->hwaccm.s.Event.fPending)
     4688            &&  pVCpu->hm.s.Event.fPending)
    46894689        {
    46904690            /* Caused by an injected interrupt. */
    4691             pVCpu->hwaccm.s.Event.fPending = false;
    4692 
    4693             Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hwaccm.s.Event.intInfo)));
    4694             Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo));
    4695             rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hwaccm.s.Event.intInfo), TRPM_HARDWARE_INT);
     4691            pVCpu->hm.s.Event.fPending = false;
     4692
     4693            Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo)));
     4694            Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo));
     4695            rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo), TRPM_HARDWARE_INT);
    46964696            AssertRC(rc2);
    46974697        }
     
    47024702    case VMX_EXIT_HLT:                  /* 12 Guest software attempted to execute HLT. */
    47034703        /* Check if external interrupts are pending; if so, don't switch back. */
    4704         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     4704        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    47054705        pCtx->rip++;    /* skip hlt */
    47064706        if (EMShouldContinueAfterHalt(pVCpu, pCtx))
     
    47124712    case VMX_EXIT_MWAIT:                /* 36 Guest software executed MWAIT. */
    47134713        Log2(("VMX: mwait\n"));
    4714         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMwait);
     4714        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    47154715        rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    47164716        if (    rc == VINF_EM_HALT
     
    47374737    case VMX_EXIT_MTF:                  /* 37 Exit due to Monitor Trap Flag. */
    47384738        LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip));
    4739         pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    4740         rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4739        pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
     4740        rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    47414741        AssertRC(rc2);
    4742         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMTF);
     4742        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMTF);
    47434743#if 0
    47444744        DBGFDoneStepping(pVCpu);
     
    48724872        &&  !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
    48734873    {
    4874         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatPendingHostIrq);
     4874        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    48754875        /* On the next entry we'll only sync the host context. */
    4876         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     4876        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
    48774877    }
    48784878    else
     
    48814881        /** @todo we can do better than this */
    48824882        /* Not in the VINF_PGM_CHANGE_MODE though! */
    4883         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     4883        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    48844884    }
    48854885
     
    48904890    {
    48914891        /* Try to extract more information about what might have gone wrong here. */
    4892         VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys);
    4893         pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS;
    4894         pVCpu->hwaccm.s.vmx.lasterror.idEnteredCpu   = pVCpu->hwaccm.s.idEnteredCpu;
    4895         pVCpu->hwaccm.s.vmx.lasterror.idCurrentCpu   = RTMpCpuId();
     4892        VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
     4893        pVCpu->hm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS;
     4894        pVCpu->hm.s.vmx.lasterror.idEnteredCpu   = pVCpu->hm.s.idEnteredCpu;
     4895        pVCpu->hm.s.vmx.lasterror.idCurrentCpu   = RTMpCpuId();
    48964896    }
    48974897
     
    49054905#endif
    49064906
    4907     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2, x);
    4908     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    4909     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     4907    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     4908    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     4909    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    49104910    Log2(("X"));
    49114911    return VBOXSTRICTRC_TODO(rc);
     
    49234923VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    49244924{
    4925     Assert(pVM->hwaccm.s.vmx.fSupported);
     4925    Assert(pVM->hm.s.vmx.fSupported);
    49264926    NOREF(pCpu);
    49274927
     
    49344934
    49354935    /* Activate the VMCS. */
    4936     int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4936    int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    49374937    if (RT_FAILURE(rc))
    49384938        return rc;
    49394939
    4940     pVCpu->hwaccm.s.fResumeVM = false;
     4940    pVCpu->hm.s.fResumeVM = false;
    49414941    return VINF_SUCCESS;
    49424942}
     
    49534953VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    49544954{
    4955     Assert(pVM->hwaccm.s.vmx.fSupported);
     4955    Assert(pVM->hm.s.vmx.fSupported);
    49564956
    49574957#ifdef DEBUG
     
    49594959    {
    49604960        CPUMR0LoadHostDebugState(pVM, pVCpu);
    4961         Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     4961        Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    49624962    }
    49634963    else
     
    49724972
    49734973        /* Enable DRx move intercepts again. */
    4974         pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    4975         int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4974        pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     4975        int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    49764976        AssertRC(rc);
    49774977
    49784978        /* Resync the debug registers the next time. */
    4979         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     4979        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    49804980    }
    49814981    else
    4982         Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     4982        Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    49834983
    49844984    /*
     
    49864986     * VMCS data back to memory.
    49874987     */
    4988     int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4988    int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    49894989    AssertRC(rc);
    49904990
     
    50065006
    50075007    LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush));
    5008     Assert(pVM->hwaccm.s.fNestedPaging);
    5009     descriptor[0] = pVCpu->hwaccm.s.vmx.GCPhysEPTP;
     5008    Assert(pVM->hm.s.fNestedPaging);
     5009    descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP;
    50105010    descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */
    50115011    int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
    5012     AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hwaccm.s.vmx.GCPhysEPTP, rc));
     5012    AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc));
    50135013}
    50145014
     
    50295029    uint64_t descriptor[2];
    50305030
    5031     Assert(pVM->hwaccm.s.vmx.fVPID);
     5031    Assert(pVM->hm.s.vmx.fVPID);
    50325032    if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
    50335033    {
     
    50385038    {
    50395039        AssertPtr(pVCpu);
    5040         AssertMsg(pVCpu->hwaccm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
    5041         AssertMsg(pVCpu->hwaccm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
    5042         descriptor[0] = pVCpu->hwaccm.s.uCurrentASID;
     5040        AssertMsg(pVCpu->hm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
     5041        AssertMsg(pVCpu->hm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
     5042        descriptor[0] = pVCpu->hm.s.uCurrentASID;
    50435043        descriptor[1] = GCPtr;
    50445044    }
    50455045    int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
    50465046    AssertMsg(rc == VINF_SUCCESS,
    5047               ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hwaccm.s.uCurrentASID : 0, GCPtr, rc));
     5047              ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentASID : 0, GCPtr, rc));
    50485048}
    50495049
     
    50735073         * function maybe called in a loop with individual addresses.
    50745074         */
    5075         if (pVM->hwaccm.s.vmx.fVPID)
     5075        if (pVM->hm.s.vmx.fVPID)
    50765076        {
    50775077            /* If we can flush just this page do it, otherwise flush as little as possible. */
    5078             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     5078            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    50795079                hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
    50805080            else
    50815081                VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    50825082        }
    5083         else if (pVM->hwaccm.s.fNestedPaging)
     5083        else if (pVM->hm.s.fNestedPaging)
    50845084            VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    50855085    }
     
    51475147                Log(("Current stack %08x\n", &rc2));
    51485148
    5149                 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
    5150                 pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
     5149                pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError;
     5150                pVCpu->hm.s.vmx.lasterror.ulExitReason = exitReason;
    51515151
    51525152#ifdef VBOX_STRICT
     
    51835183                {
    51845184                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5185                     HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
     5185                    HMR0DumpDescriptor(pDesc, val, "CS: ");
    51865186                }
    51875187
     
    51915191                {
    51925192                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5193                     HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
     5193                    HMR0DumpDescriptor(pDesc, val, "DS: ");
    51945194                }
    51955195
     
    51995199                {
    52005200                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5201                     HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
     5201                    HMR0DumpDescriptor(pDesc, val, "ES: ");
    52025202                }
    52035203
     
    52075207                {
    52085208                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5209                     HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
     5209                    HMR0DumpDescriptor(pDesc, val, "FS: ");
    52105210                }
    52115211
     
    52155215                {
    52165216                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5217                     HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
     5217                    HMR0DumpDescriptor(pDesc, val, "GS: ");
    52185218                }
    52195219
     
    52235223                {
    52245224                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5225                     HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
     5225                    HMR0DumpDescriptor(pDesc, val, "SS: ");
    52265226                }
    52275227
     
    52315231                {
    52325232                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5233                     HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
     5233                    HMR0DumpDescriptor(pDesc, val, "TR: ");
    52345234                }
    52355235
     
    52925292    int             rc;
    52935293
    5294     pCpu = HWACCMR0GetCurrentCpu();
     5294    pCpu = HMR0GetCurrentCpu();
    52955295    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    52965296
     
    52985298    pCache->uPos = 1;
    52995299    pCache->interPD = PGMGetInterPaeCR3(pVM);
    5300     pCache->pSwitcher = (uint64_t)pVM->hwaccm.s.pfnHost32ToGuest64R0;
     5300    pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
    53015301#endif
    53025302
     
    53135313    aParam[0] = (uint32_t)(HCPhysCpuPage);                                  /* Param 1: VMXON physical address - Lo. */
    53145314    aParam[1] = (uint32_t)(HCPhysCpuPage >> 32);                            /* Param 1: VMXON physical address - Hi. */
    5315     aParam[2] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS);                 /* Param 2: VMCS physical address - Lo. */
    5316     aParam[3] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS >> 32);           /* Param 2: VMCS physical address - Hi. */
    5317     aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache);
     5315    aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS);                 /* Param 2: VMCS physical address - Lo. */
     5316    aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS >> 32);           /* Param 2: VMCS physical address - Hi. */
     5317    aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
    53185318    aParam[5] = 0;
    53195319
    53205320#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5321     pCtx->dr[4] = pVM->hwaccm.s.vmx.pScratchPhys + 16 + 8;
    5322     *(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) = 1;
    5323 #endif
    5324     rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
     5321    pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
     5322    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
     5323#endif
     5324    rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
    53255325
    53265326#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5327     Assert(*(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) == 5);
     5327    Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
    53285328    Assert(pCtx->dr[4] == 10);
    5329     *(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) = 0xff;
     5329    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
    53305330#endif
    53315331
    53325332#ifdef DEBUG
    53335333    AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
    5334     AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
    5335                                                                               pVCpu->hwaccm.s.vmx.HCPhysVMCS));
     5334    AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
     5335                                                                              pVCpu->hm.s.vmx.HCPhysVMCS));
    53365336    AssertMsg(pCache->TestIn.HCPhysVMCS   == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
    53375337                                                                          pCache->TestOut.HCPhysVMCS));
    53385338    AssertMsg(pCache->TestIn.pCache       == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
    53395339                                                                      pCache->TestOut.pCache));
    5340     AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache),
    5341               ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
     5340    AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
     5341              ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
    53425342    AssertMsg(pCache->TestIn.pCtx         == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
    53435343                                                                    pCache->TestOut.pCtx));
     
    54665466    RTHCUINTREG     uOldEFlags;
    54675467
    5468     AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     5468    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    54695469    Assert(pfnHandler);
    5470     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));
    5471     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));
     5470    Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
     5471    Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
    54725472
    54735473#ifdef VBOX_STRICT
    5474     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++)
    5475         Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i]));
    5476 
    5477     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++)
    5478         Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i]));
     5474    for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries;i++)
     5475        Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
     5476
     5477    for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries;i++)
     5478        Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
    54795479#endif
    54805480
     
    54875487#endif
    54885488
    5489     pCpu = HWACCMR0GetCurrentCpu();
     5489    pCpu = HMR0GetCurrentCpu();
    54905490    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    54915491
    54925492    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    5493     VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     5493    VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    54945494
    54955495    /* Leave VMX Root Mode. */
     
    55035503        CPUMPushHyper(pVCpu, paParam[i]);
    55045504
    5505     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     5505    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    55065506
    55075507    /* Call switcher. */
    5508     rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
    5509     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     5508    rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     5509    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    55105510
    55115511    /* Make sure the VMX instructions don't cause #UD faults. */
     
    55215521    }
    55225522
    5523     rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     5523    rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    55245524    AssertRC(rc2);
    55255525    Assert(!(ASMGetFlags() & X86_EFL_IF));
     
    56095609VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    56105610{
    5611     PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     5611    PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    56125612
    56135613    AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r43307 r43387  
    2424#include <VBox/vmm/stam.h>
    2525#include <VBox/dis.h>
    26 #include <VBox/vmm/hwaccm.h>
     26#include <VBox/vmm/hm.h>
    2727#include <VBox/vmm/pgm.h>
    28 #include <VBox/vmm/hwacc_vmx.h>
     28#include <VBox/vmm/hm_vmx.h>
    2929
    3030RT_C_DECLS_BEGIN
     
    220220        else                                                                                    \
    221221        if (    CPUMIsGuestInRealModeEx(pCtx)                                                   \
    222             &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)                                          \
     222            &&  !pVM->hm.s.vmx.fUnrestrictedGuest)                                          \
    223223        {                                                                                       \
    224224            /* Must override this or else VT-x will fail with invalid guest state errors. */    \
     
    291291{
    292292    Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
    293     *pVal = pVCpu->hwaccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
     293    *pVal = pVCpu->hm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
    294294    return VINF_SUCCESS;
    295295}
  • trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp

    r42222 r43387  
    2828#include <VBox/vmm/vmm.h>
    2929#include <VBox/vmm/patm.h>
    30 #include <VBox/vmm/hwaccm.h>
     30#include <VBox/vmm/hm.h>
    3131
    3232#include <VBox/log.h>
     
    374374    PDMDEV_ASSERT_DEVINS(pDevIns);
    375375    LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
    376     return HWACCMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));
     376    return HMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));
    377377}
    378378
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r43379 r43387  
    3737#include <VBox/vmm/gmm.h>
    3838#include <VBox/intnet.h>
    39 #include <VBox/vmm/hwaccm.h>
     39#include <VBox/vmm/hm.h>
    4040#include <VBox/param.h>
    4141#include <VBox/err.h>
     
    117117
    118118    /*
    119      * Initialize the VMM, GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
     119     * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
    120120     */
    121121    int rc = vmmInitFormatTypes();
     
    128128            if (RT_SUCCESS(rc))
    129129            {
    130                 rc = HWACCMR0Init();
     130                rc = HMR0Init();
    131131                if (RT_SUCCESS(rc))
    132132                {
     
    188188                    else
    189189                        LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
    190                     HWACCMR0Term();
     190                    HMR0Term();
    191191                }
    192192                else
    193                     LogRel(("ModuleInit: HWACCMR0Init -> %Rrc\n", rc));
     193                    LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
    194194                GMMR0Term();
    195195            }
     
    231231
    232232    /*
    233      * PGM (Darwin), HWACCM and PciRaw global cleanup.
     233     * PGM (Darwin), HM and PciRaw global cleanup.
    234234     */
    235235#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    240240#endif
    241241    PGMDeregisterStringFormatTypes();
    242     HWACCMR0Term();
     242    HMR0Term();
    243243#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
    244244    vmmR0TripleFaultHackTerm();
     
    342342    {
    343343        /*
    344          * Init HWACCM, CPUM and PGM (Darwin only).
    345          */
    346         rc = HWACCMR0InitVM(pVM);
     344         * Init HM, CPUM and PGM (Darwin only).
     345         */
     346        rc = HMR0InitVM(pVM);
    347347        if (RT_SUCCESS(rc))
    348348        {
     
    370370            PciRawR0TermVM(pVM);
    371371#endif
    372             HWACCMR0TermVM(pVM);
     372            HMR0TermVM(pVM);
    373373        }
    374374    }
     
    410410        PGMR0DynMapTermVM(pVM);
    411411#endif
    412         HWACCMR0TermVM(pVM);
     412        HMR0TermVM(pVM);
    413413    }
    414414
     
    603603            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
    604604            break;
    605         case VINF_EM_HWACCM_PATCH_TPR_INSTR:
     605        case VINF_EM_HM_PATCH_TPR_INSTR:
    606606            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
    607607            break;
     
    662662            /* Some safety precautions first. */
    663663#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    664             if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
     664            if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled /* hm */
    665665                          && pVM->cCpus == 1               /* !smp */
    666666                          && PGMGetHyperCR3(pVCpu)))
     
    683683                /* We might need to disable VT-x if the active switcher turns off paging. */
    684684                bool fVTxDisabled;
    685                 int rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
     685                int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
    686686                if (RT_SUCCESS(rc))
    687687                {
     
    705705
    706706                    /* Re-enable VT-x if previously turned off. */
    707                     HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
     707                    HMR0LeaveSwitcher(pVM, fVTxDisabled);
    708708
    709709                    if (    rc == VINF_EM_RAW_INTERRUPT
     
    770770#endif
    771771            int rc;
    772             if (!HWACCMR0SuspendPending())
     772            if (!HMR0SuspendPending())
    773773            {
    774                 rc = HWACCMR0Enter(pVM, pVCpu);
     774                rc = HMR0Enter(pVM, pVCpu);
    775775                if (RT_SUCCESS(rc))
    776776                {
    777                     rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
    778                     int rc2 = HWACCMR0Leave(pVM, pVCpu);
     777                    rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
     778                    int rc2 = HMR0Leave(pVM, pVCpu);
    779779                    AssertRC(rc2);
    780780                }
     
    962962
    963963        /*
    964          * Attempt to enable hwacc mode and check the current setting.
     964         * Attempt to enable hm mode and check the current setting.
    965965         */
    966966        case VMMR0_DO_HWACC_ENABLE:
    967             return HWACCMR0EnableAllCpus(pVM);
     967            return HMR0EnableAllCpus(pVM);
    968968
    969969        /*
     
    971971         */
    972972        case VMMR0_DO_HWACC_SETUP_VM:
    973             return HWACCMR0SetupVM(pVM);
     973            return HMR0SetupVM(pVM);
    974974
    975975        /*
     
    981981            bool fVTxDisabled;
    982982
    983             /* Safety precaution as HWACCM can disable the switcher. */
     983            /* Safety precaution as HM can disable the switcher. */
    984984            Assert(!pVM->vmm.s.fSwitcherDisabled);
    985985            if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
     
    999999
    10001000            /* We might need to disable VT-x if the active switcher turns off paging. */
    1001             rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
     1001            rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
    10021002            if (RT_FAILURE(rc))
    10031003                return rc;
     
    10061006
    10071007            /* Re-enable VT-x if previously turned off. */
    1008             HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
     1008            HMR0LeaveSwitcher(pVM, fVTxDisabled);
    10091009
    10101010            /** @todo dispatch interrupts? */
     
    12841284            if (idCpu == NIL_VMCPUID)
    12851285                return VERR_INVALID_CPU_ID;
    1286             return HWACCMR0TestSwitcher3264(pVM);
     1286            return HMR0TestSwitcher3264(pVM);
    12871287#endif
    12881288        default:
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette